hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
5e9095e4dedd8b88ade554da76518a4349b9b330.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <math.h>
#include <fstream>
#include <time.h>
using namespace std;
#define TILE 25
__global__ void
CUDAedge( int *a, const int b)
{
int pos = threadIdx.x;
int min[9] = { a[pos], a[pos+1], a[pos-1], a[pos+b], a[pos+b+1], a[pos+b-1], a[pos-b], a[pos-b+1], a[pos-b-1] };
int val=0;
for (int i=0;i<9;i++)
{
if (min[i]>val)
val=min[i];
}
a[pos]=val;
for (int i=0;i<9;i++)
{
if (min[i]<val)
val=min[i];
}
a[pos]=val;
}
int main ()
{
ifstream fin;
fin.open("input.in");
ofstream fout;
fout.open("output.out");
int i,h,b,cs;
double total=0,total1=0;
fin>>cs;
for (int k=1;k<=cs;k++)
{
clock_t st = clock();
fin>>h>>b;
int *ha,*da;
int *r=new int[h*b];
ha=new int[h*b];
for ( i = 0 ; i<h*b ; i++ )
fin>>ha[i];
hipMalloc((void **) &da, h*b*sizeof (int));
hipMemcpy(da,ha,h*b*sizeof(int),hipMemcpyHostToDevice);
clock_t st1 = clock();
hipLaunchKernelGGL(( CUDAedge) , dim3(1),dim3(h*b), 0, 0, da , b) ;
hipDeviceSynchronize();
hipDeviceSynchronize();
hipHostFree(ha);
hipFree(da);
clock_t et = clock();
double t=double(et - st)/CLOCKS_PER_SEC;
double t1=double(et - st1)/CLOCKS_PER_SEC;
total+=t;
total1+=t1;
fout<<k<<'\t'<<h<<'\t'<<b<<'\t'<<h*b<<'\t'<<t<<'\t'<<t1<<endl;
hipMemcpy(r,da,h*b*sizeof(int),hipMemcpyDeviceToHost);
}
//fout<<total<<'\t'<<total1;
fin.close();
fout.close();
}
|
5e9095e4dedd8b88ade554da76518a4349b9b330.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <math.h>
#include <fstream>
#include <time.h>
using namespace std;
#define TILE 25
__global__ void
CUDAedge( int *a, const int b)
{
int pos = threadIdx.x;
int min[9] = { a[pos], a[pos+1], a[pos-1], a[pos+b], a[pos+b+1], a[pos+b-1], a[pos-b], a[pos-b+1], a[pos-b-1] };
int val=0;
for (int i=0;i<9;i++)
{
if (min[i]>val)
val=min[i];
}
a[pos]=val;
for (int i=0;i<9;i++)
{
if (min[i]<val)
val=min[i];
}
a[pos]=val;
}
int main ()
{
ifstream fin;
fin.open("input.in");
ofstream fout;
fout.open("output.out");
int i,h,b,cs;
double total=0,total1=0;
fin>>cs;
for (int k=1;k<=cs;k++)
{
clock_t st = clock();
fin>>h>>b;
int *ha,*da;
int *r=new int[h*b];
ha=new int[h*b];
for ( i = 0 ; i<h*b ; i++ )
fin>>ha[i];
cudaMalloc((void **) &da, h*b*sizeof (int));
cudaMemcpy(da,ha,h*b*sizeof(int),cudaMemcpyHostToDevice);
clock_t st1 = clock();
CUDAedge <<<1,h*b>>> ( da , b) ;
cudaThreadSynchronize();
cudaDeviceSynchronize();
cudaFreeHost(ha);
cudaFree(da);
clock_t et = clock();
double t=double(et - st)/CLOCKS_PER_SEC;
double t1=double(et - st1)/CLOCKS_PER_SEC;
total+=t;
total1+=t1;
fout<<k<<'\t'<<h<<'\t'<<b<<'\t'<<h*b<<'\t'<<t<<'\t'<<t1<<endl;
cudaMemcpy(r,da,h*b*sizeof(int),cudaMemcpyDeviceToHost);
}
//fout<<total<<'\t'<<total1;
fin.close();
fout.close();
}
|
2cf881f9892ebca689743c87cdc801fc4cab6224.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/layers/loss/entrywise.hpp"
#include "lbann/utils/cuda.hpp"
namespace lbann {
namespace {
/** CUDA kernel to apply an binary backprop operator. */
template <typename BinaryBackPropOperator>
__global__
void binary_backprop_operator_kernel(El::Int height, El::Int width,
const DataType* __restrict__ x1,
El::Int x1_ldim,
const DataType* __restrict__ x2,
El::Int x2_ldim,
const DataType* __restrict__ dy,
El::Int dy_ldim,
DataType* __restrict__ dx1,
El::Int dx1_ldim,
DataType* __restrict__ dx2,
El::Int dx2_ldim) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int size = height * width;
const El::Int num_threads = blockDim.x * gridDim.x;
BinaryBackPropOperator op;
for (El::Int pos = gid; pos < size; pos += num_threads) {
const auto& row = pos % height;
const auto& col = pos / height;
op(x1[row + col * x1_ldim],
x2[row + col * x2_ldim],
dy[row + col * dy_ldim],
dx1[row + col * dx1_ldim],
dx2[row + col * dx2_ldim]);
}
}
/** Apply a binary backprop operator to CPU data.
* The input and output data must be on CPU and must have the same
* dimensions. Given a binary function \f$ y = f(x_1,x_2) \f$, the
* corresponding BinaryBackPropOperator is a 5-ary function with the
* arguments \f$ x_1 \f$, \f$ x_2 \f$, \f$ dL/dy \f$, \f$ dL/dx_1\f$,
* \f$ dL/dx_2 \f$. The last two arguments should be overwritten when
* the BinaryBackPropOperator is called.
*/
template <typename BinaryBackPropOperator>
void apply_binary_backprop_operator(const AbsMat& x1,
const AbsMat& x2,
const AbsMat& dy,
AbsMat& dx1,
AbsMat& dx2) {
// Get CUDA grid dimensions
// Note: Maximum CUDA grid dimension is 2^32-1
// (https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications).
const El::Int height = x1.Height();
const El::Int width = x1.Width();
const El::Int block_dim = 256;
El::Int grid_dim = (height * width + block_dim - 1) / block_dim;
if (sizeof(El::Int) > sizeof(unsigned int)
&& grid_dim > std::numeric_limits<uint32_t>::max()) {
grid_dim = std::numeric_limits<uint32_t>::max();
}
// Launch CUDA kernel
if (grid_dim > 0) {
CHECK_CUDA(hipSetDevice(El::GPUManager::Device()));
hipLaunchKernelGGL(( binary_backprop_operator_kernel<BinaryBackPropOperator>)
, dim3(grid_dim), dim3(block_dim), 0, El::GPUManager::Stream(),
height, width,
x1.LockedBuffer(), x1.LDim(),
x2.LockedBuffer(), x2.LDim(),
dy.LockedBuffer(), dy.LDim(),
dx1.Buffer(), dx1.LDim(),
dx2.Buffer(), dx2.LDim());
}
}
// =========================================================
// Operator objects for entry-wise binary layers
// =========================================================
// Note: Binary operator corresponds to forward prop step
// (\f$ y = f(x_1,x_2) \f$) and 5-ary operator corresponds
// to back prop step
// (\f$ \frac{dL}{dx_i} = \frac{dL}{dy} \frac{df}{dx_i}(x_1,x_2) \f$).
/** Binary cross entropy operator. */
struct binary_cross_entropy_op {
inline __device__ DataType operator()(const DataType& x1,
const DataType& x2) const {
constexpr DataType zero = 0;
constexpr DataType one = 1;
DataType y = zero;
if (x2 > zero) { y += -x2 * cuda::log(x1); }
if (x2 < one) { y += -(one-x2) * cuda::log(one-x1); }
return y;
}
inline __device__ void operator()(const DataType& x1,
const DataType& x2,
const DataType& dy,
DataType& dx1,
DataType& dx2) const {
constexpr DataType zero = 0;
constexpr DataType one = 1;
dx1 = zero;
dx2 = zero;
if (dy == zero) { return; }
if (x2 > zero) {
dx1 += -x2 / x1 * dy;
dx2 += -cuda::log(x1) * dy;
}
if (x2 < one) {
dx1 += (one-x2) / (one-x1) * dy;
dx2 += cuda::log(one-x1) * dy;
}
}
};
/** Sigmoid binary cross entropy operator.
* Equivalent to applying a sigmoid function to the first operand and
* then computing the binary cross entropy. Numerically stable
* implementation is taken from
* https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits.
*/
struct sigmoid_binary_cross_entropy_op {
inline __device__ DataType operator()(const DataType& x1,
const DataType& x2) const {
constexpr DataType zero = 0;
constexpr DataType one = 1;
const auto& z = cuda::max(zero, cuda::min(x2, one));
if (x1 > zero) {
return (one - z) * x1 + cuda::log1p(cuda::exp(-x1));
} else {
return - x1 * z + cuda::log1p(cuda::exp(x1));
}
}
inline __device__ void operator()(const DataType& x1,
const DataType& x2,
const DataType& dy,
DataType& dx1,
DataType& dx2) const {
constexpr DataType zero = 0;
constexpr DataType one = 1;
const auto& z = cuda::max(zero, cuda::min(x2, one));
if (x1 > zero) {
dx1 = -z + 1 / (one + cuda::exp(-x1));
} else {
dx1 = one - z - 1 / (one + cuda::exp(x1));
}
dx1 *= dy;
dx2 = (x2 == z) ? -x1 * dy : zero;
}
};
/** Boolean accuracy operator. */
struct boolean_accuracy_op {
inline __device__ DataType operator()(const DataType& x1,
const DataType& x2) const {
const auto& b1 = x1 >= DataType(0.5);
const auto& b2 = x2 >= DataType(0.5);
return b1 == b2 ? DataType(1) : DataType(0);
}
inline __device__ void operator()(const DataType& x1,
const DataType& x2,
const DataType& dy,
DataType& dx1,
DataType& dx2) const {
dx1 = DataType(0);
dx2 = DataType(0);
}
};
/** Boolean false negative operator. */
struct boolean_false_negative_op {
inline __device__ DataType operator()(const DataType& x1,
const DataType& x2) const {
const auto& b1 = x1 >= DataType(0.5);
const auto& b2 = x2 >= DataType(0.5);
return (!b1 && b2) ? DataType(1) : DataType(0);
}
inline __device__ void operator()(const DataType& x1,
const DataType& x2,
const DataType& dy,
DataType& dx1,
DataType& dx2) const {
dx1 = DataType(0);
dx2 = DataType(0);
}
};
/** Boolean false positive operator. */
struct boolean_false_positive_op {
inline __device__ DataType operator()(const DataType& x1,
const DataType& x2) const {
const auto& b1 = x1 >= DataType(0.5);
const auto& b2 = x2 >= DataType(0.5);
return (b1 && !b2) ? DataType(1) : DataType(0);
}
inline __device__ void operator()(const DataType& x1,
const DataType& x2,
const DataType& dy,
DataType& dx1,
DataType& dx2) const {
dx1 = DataType(0);
dx2 = DataType(0);
}
};
} // namespace
// Template instantiation
#define INSTANTIATE(layer, op) \
template <> \
void layer<data_layout::MODEL_PARALLEL, El::Device::GPU> \
::fp_compute() { \
cuda::apply_entrywise_binary_operator<op>(get_prev_activations(0), \
get_prev_activations(1), \
get_activations()); \
} \
template <> \
void layer<data_layout::MODEL_PARALLEL, El::Device::GPU> \
::bp_compute() { \
apply_binary_backprop_operator<op>(get_local_prev_activations(0), \
get_local_prev_activations(1), \
get_local_prev_error_signals(), \
get_local_error_signals(0), \
get_local_error_signals(1)); \
} \
template <> \
void layer<data_layout::DATA_PARALLEL, El::Device::GPU> \
::fp_compute() { \
cuda::apply_entrywise_binary_operator<op>(get_prev_activations(0), \
get_prev_activations(1), \
get_activations()); \
} \
template <> \
void layer<data_layout::DATA_PARALLEL, El::Device::GPU> \
::bp_compute() { \
apply_binary_backprop_operator<op>(get_local_prev_activations(0), \
get_local_prev_activations(1), \
get_local_prev_error_signals(), \
get_local_error_signals(0), \
get_local_error_signals(1)); \
}
INSTANTIATE(binary_cross_entropy_layer, binary_cross_entropy_op)
INSTANTIATE(sigmoid_binary_cross_entropy_layer, sigmoid_binary_cross_entropy_op)
INSTANTIATE(boolean_accuracy_layer, boolean_accuracy_op)
INSTANTIATE(boolean_false_negative_layer, boolean_false_negative_op)
INSTANTIATE(boolean_false_positive_layer, boolean_false_positive_op)
} // namespace lbann
|
2cf881f9892ebca689743c87cdc801fc4cab6224.cu
|
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/layers/loss/entrywise.hpp"
#include "lbann/utils/cuda.hpp"
namespace lbann {
namespace {
/** CUDA kernel to apply an binary backprop operator. */
template <typename BinaryBackPropOperator>
__global__
void binary_backprop_operator_kernel(El::Int height, El::Int width,
const DataType* __restrict__ x1,
El::Int x1_ldim,
const DataType* __restrict__ x2,
El::Int x2_ldim,
const DataType* __restrict__ dy,
El::Int dy_ldim,
DataType* __restrict__ dx1,
El::Int dx1_ldim,
DataType* __restrict__ dx2,
El::Int dx2_ldim) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int size = height * width;
const El::Int num_threads = blockDim.x * gridDim.x;
BinaryBackPropOperator op;
for (El::Int pos = gid; pos < size; pos += num_threads) {
const auto& row = pos % height;
const auto& col = pos / height;
op(x1[row + col * x1_ldim],
x2[row + col * x2_ldim],
dy[row + col * dy_ldim],
dx1[row + col * dx1_ldim],
dx2[row + col * dx2_ldim]);
}
}
/** Apply a binary backprop operator to CPU data.
* The input and output data must be on CPU and must have the same
* dimensions. Given a binary function \f$ y = f(x_1,x_2) \f$, the
* corresponding BinaryBackPropOperator is a 5-ary function with the
* arguments \f$ x_1 \f$, \f$ x_2 \f$, \f$ dL/dy \f$, \f$ dL/dx_1\f$,
* \f$ dL/dx_2 \f$. The last two arguments should be overwritten when
* the BinaryBackPropOperator is called.
*/
template <typename BinaryBackPropOperator>
void apply_binary_backprop_operator(const AbsMat& x1,
const AbsMat& x2,
const AbsMat& dy,
AbsMat& dx1,
AbsMat& dx2) {
// Get CUDA grid dimensions
// Note: Maximum CUDA grid dimension is 2^32-1
// (https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications).
const El::Int height = x1.Height();
const El::Int width = x1.Width();
const El::Int block_dim = 256;
El::Int grid_dim = (height * width + block_dim - 1) / block_dim;
if (sizeof(El::Int) > sizeof(unsigned int)
&& grid_dim > std::numeric_limits<uint32_t>::max()) {
grid_dim = std::numeric_limits<uint32_t>::max();
}
// Launch CUDA kernel
if (grid_dim > 0) {
CHECK_CUDA(cudaSetDevice(El::GPUManager::Device()));
binary_backprop_operator_kernel<BinaryBackPropOperator>
<<<grid_dim, block_dim, 0, El::GPUManager::Stream()>>>(
height, width,
x1.LockedBuffer(), x1.LDim(),
x2.LockedBuffer(), x2.LDim(),
dy.LockedBuffer(), dy.LDim(),
dx1.Buffer(), dx1.LDim(),
dx2.Buffer(), dx2.LDim());
}
}
// =========================================================
// Operator objects for entry-wise binary layers
// =========================================================
// Note: Binary operator corresponds to forward prop step
// (\f$ y = f(x_1,x_2) \f$) and 5-ary operator corresponds
// to back prop step
// (\f$ \frac{dL}{dx_i} = \frac{dL}{dy} \frac{df}{dx_i}(x_1,x_2) \f$).
/** Binary cross entropy operator. */
struct binary_cross_entropy_op {
inline __device__ DataType operator()(const DataType& x1,
const DataType& x2) const {
constexpr DataType zero = 0;
constexpr DataType one = 1;
DataType y = zero;
if (x2 > zero) { y += -x2 * cuda::log(x1); }
if (x2 < one) { y += -(one-x2) * cuda::log(one-x1); }
return y;
}
inline __device__ void operator()(const DataType& x1,
const DataType& x2,
const DataType& dy,
DataType& dx1,
DataType& dx2) const {
constexpr DataType zero = 0;
constexpr DataType one = 1;
dx1 = zero;
dx2 = zero;
if (dy == zero) { return; }
if (x2 > zero) {
dx1 += -x2 / x1 * dy;
dx2 += -cuda::log(x1) * dy;
}
if (x2 < one) {
dx1 += (one-x2) / (one-x1) * dy;
dx2 += cuda::log(one-x1) * dy;
}
}
};
/** Sigmoid binary cross entropy operator.
* Equivalent to applying a sigmoid function to the first operand and
* then computing the binary cross entropy. Numerically stable
* implementation is taken from
* https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits.
*/
struct sigmoid_binary_cross_entropy_op {
inline __device__ DataType operator()(const DataType& x1,
const DataType& x2) const {
constexpr DataType zero = 0;
constexpr DataType one = 1;
const auto& z = cuda::max(zero, cuda::min(x2, one));
if (x1 > zero) {
return (one - z) * x1 + cuda::log1p(cuda::exp(-x1));
} else {
return - x1 * z + cuda::log1p(cuda::exp(x1));
}
}
inline __device__ void operator()(const DataType& x1,
const DataType& x2,
const DataType& dy,
DataType& dx1,
DataType& dx2) const {
constexpr DataType zero = 0;
constexpr DataType one = 1;
const auto& z = cuda::max(zero, cuda::min(x2, one));
if (x1 > zero) {
dx1 = -z + 1 / (one + cuda::exp(-x1));
} else {
dx1 = one - z - 1 / (one + cuda::exp(x1));
}
dx1 *= dy;
dx2 = (x2 == z) ? -x1 * dy : zero;
}
};
/** Boolean accuracy operator. */
struct boolean_accuracy_op {
inline __device__ DataType operator()(const DataType& x1,
const DataType& x2) const {
const auto& b1 = x1 >= DataType(0.5);
const auto& b2 = x2 >= DataType(0.5);
return b1 == b2 ? DataType(1) : DataType(0);
}
inline __device__ void operator()(const DataType& x1,
const DataType& x2,
const DataType& dy,
DataType& dx1,
DataType& dx2) const {
dx1 = DataType(0);
dx2 = DataType(0);
}
};
/** Boolean false negative operator. */
struct boolean_false_negative_op {
inline __device__ DataType operator()(const DataType& x1,
const DataType& x2) const {
const auto& b1 = x1 >= DataType(0.5);
const auto& b2 = x2 >= DataType(0.5);
return (!b1 && b2) ? DataType(1) : DataType(0);
}
inline __device__ void operator()(const DataType& x1,
const DataType& x2,
const DataType& dy,
DataType& dx1,
DataType& dx2) const {
dx1 = DataType(0);
dx2 = DataType(0);
}
};
/** Boolean false positive operator. */
struct boolean_false_positive_op {
inline __device__ DataType operator()(const DataType& x1,
const DataType& x2) const {
const auto& b1 = x1 >= DataType(0.5);
const auto& b2 = x2 >= DataType(0.5);
return (b1 && !b2) ? DataType(1) : DataType(0);
}
inline __device__ void operator()(const DataType& x1,
const DataType& x2,
const DataType& dy,
DataType& dx1,
DataType& dx2) const {
dx1 = DataType(0);
dx2 = DataType(0);
}
};
} // namespace
// Template instantiation
#define INSTANTIATE(layer, op) \
template <> \
void layer<data_layout::MODEL_PARALLEL, El::Device::GPU> \
::fp_compute() { \
cuda::apply_entrywise_binary_operator<op>(get_prev_activations(0), \
get_prev_activations(1), \
get_activations()); \
} \
template <> \
void layer<data_layout::MODEL_PARALLEL, El::Device::GPU> \
::bp_compute() { \
apply_binary_backprop_operator<op>(get_local_prev_activations(0), \
get_local_prev_activations(1), \
get_local_prev_error_signals(), \
get_local_error_signals(0), \
get_local_error_signals(1)); \
} \
template <> \
void layer<data_layout::DATA_PARALLEL, El::Device::GPU> \
::fp_compute() { \
cuda::apply_entrywise_binary_operator<op>(get_prev_activations(0), \
get_prev_activations(1), \
get_activations()); \
} \
template <> \
void layer<data_layout::DATA_PARALLEL, El::Device::GPU> \
::bp_compute() { \
apply_binary_backprop_operator<op>(get_local_prev_activations(0), \
get_local_prev_activations(1), \
get_local_prev_error_signals(), \
get_local_error_signals(0), \
get_local_error_signals(1)); \
}
INSTANTIATE(binary_cross_entropy_layer, binary_cross_entropy_op)
INSTANTIATE(sigmoid_binary_cross_entropy_layer, sigmoid_binary_cross_entropy_op)
INSTANTIATE(boolean_accuracy_layer, boolean_accuracy_op)
INSTANTIATE(boolean_false_negative_layer, boolean_false_negative_op)
INSTANTIATE(boolean_false_positive_layer, boolean_false_positive_op)
} // namespace lbann
|
3331067c9886fed7dd9159ed022efa3e6e5e64e4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/Limits.cuh>
#include <faiss/gpu/utils/Select.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/gpu/utils/Tensor.cuh>
//
// This kernel is split into a separate compilation unit to cut down
// on compile time
//
namespace faiss { namespace gpu {
template <int ThreadsPerBlock, int NumWarpQ, int NumThreadQ, bool Dir>
__global__ void
pass1SelectLists(void** listIndices,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<int, 2, true> topQueryToCentroid,
Tensor<uint8_t, 1, true> bitset,
Tensor<float, 1, true> distance,
int nprobe,
int k,
IndicesOptions opt,
Tensor<float, 3, true> heapDistances,
Tensor<int, 3, true> heapIndices) {
constexpr int kNumWarps = ThreadsPerBlock / kWarpSize;
__shared__ float smemK[kNumWarps * NumWarpQ];
__shared__ int smemV[kNumWarps * NumWarpQ];
constexpr auto kInit = Dir ? kFloatMin : kFloatMax;
BlockSelect<float, int, Dir, Comparator<float>,
NumWarpQ, NumThreadQ, ThreadsPerBlock>
heap(kInit, -1, smemK, smemV, k);
auto queryId = blockIdx.y;
auto sliceId = blockIdx.x;
auto numSlices = gridDim.x;
int sliceSize = (nprobe / numSlices);
int sliceStart = sliceSize * sliceId;
int sliceEnd = sliceId == (numSlices - 1) ? nprobe :
sliceStart + sliceSize;
auto offsets = prefixSumOffsets[queryId].data();
// We ensure that before the array (at offset -1), there is a 0 value
int start = *(&offsets[sliceStart] - 1);
int end = offsets[sliceEnd - 1];
int num = end - start;
int limit = utils::roundDown(num, kWarpSize);
int i = threadIdx.x;
auto distanceStart = distance[start].data();
bool bitsetEmpty = (bitset.getSize(0) == 0);
long index = -1;
// BlockSelect add cannot be used in a warp divergent circumstance; we
// handle the remainder warp below
for (; i < limit; i += blockDim.x) {
index = getListIndex(queryId,
start + i,
listIndices,
prefixSumOffsets,
topQueryToCentroid,
opt);
if (bitsetEmpty || (!(bitset[index >> 3] & (0x1 << (index & 0x7))))) {
heap.addThreadQ(distanceStart[i], start + i);
}
heap.checkThreadQ();
}
// Handle warp divergence separately
if (i < num) {
index = getListIndex(queryId,
start + i,
listIndices,
prefixSumOffsets,
topQueryToCentroid,
opt);
if (bitsetEmpty || (!(bitset[index >> 3] & (0x1 << (index & 0x7))))) {
heap.addThreadQ(distanceStart[i], start + i);
}
}
// Merge all final results
heap.reduce();
// Write out the final k-selected values; they should be all
// together
for (int i = threadIdx.x; i < k; i += blockDim.x) {
heapDistances[queryId][sliceId][i] = smemK[i];
heapIndices[queryId][sliceId][i] = smemV[i];
}
}
void
runPass1SelectLists(thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<int, 2, true>& topQueryToCentroid,
Tensor<uint8_t, 1, true>& bitset,
Tensor<float, 1, true>& distance,
int nprobe,
int k,
bool chooseLargest,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
hipStream_t stream) {
// This is caught at a higher level
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
auto grid = dim3(heapDistances.getSize(1), prefixSumOffsets.getSize(0));
#define RUN_PASS(BLOCK, NUM_WARP_Q, NUM_THREAD_Q, DIR) \
do { \
hipLaunchKernelGGL(( pass1SelectLists<BLOCK, NUM_WARP_Q, NUM_THREAD_Q, DIR>) \
, dim3(grid), dim3(BLOCK), 0, stream, listIndices.data().get(), \
prefixSumOffsets, \
topQueryToCentroid, \
bitset, \
distance, \
nprobe, \
k, \
indicesOptions, \
heapDistances, \
heapIndices); \
CUDA_TEST_ERROR(); \
return; /* success */ \
} while (0)
#if GPU_MAX_SELECTION_K >= 2048
// block size 128 for k <= 1024, 64 for k = 2048
#define RUN_PASS_DIR(DIR) \
do { \
if (k == 1) { \
RUN_PASS(128, 1, 1, DIR); \
} else if (k <= 32) { \
RUN_PASS(128, 32, 2, DIR); \
} else if (k <= 64) { \
RUN_PASS(128, 64, 3, DIR); \
} else if (k <= 128) { \
RUN_PASS(128, 128, 3, DIR); \
} else if (k <= 256) { \
RUN_PASS(128, 256, 4, DIR); \
} else if (k <= 512) { \
RUN_PASS(128, 512, 8, DIR); \
} else if (k <= 1024) { \
RUN_PASS(128, 1024, 8, DIR); \
} else if (k <= 2048) { \
RUN_PASS(64, 2048, 8, DIR); \
} \
} while (0)
#else
#define RUN_PASS_DIR(DIR) \
do { \
if (k == 1) { \
RUN_PASS(128, 1, 1, DIR); \
} else if (k <= 32) { \
RUN_PASS(128, 32, 2, DIR); \
} else if (k <= 64) { \
RUN_PASS(128, 64, 3, DIR); \
} else if (k <= 128) { \
RUN_PASS(128, 128, 3, DIR); \
} else if (k <= 256) { \
RUN_PASS(128, 256, 4, DIR); \
} else if (k <= 512) { \
RUN_PASS(128, 512, 8, DIR); \
} else if (k <= 1024) { \
RUN_PASS(128, 1024, 8, DIR); \
} \
} while (0)
#endif // GPU_MAX_SELECTION_K
if (chooseLargest) {
RUN_PASS_DIR(true);
} else {
RUN_PASS_DIR(false);
}
#undef RUN_PASS_DIR
#undef RUN_PASS
}
} } // namespace
|
3331067c9886fed7dd9159ed022efa3e6e5e64e4.cu
|
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/Limits.cuh>
#include <faiss/gpu/utils/Select.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/gpu/utils/Tensor.cuh>
//
// This kernel is split into a separate compilation unit to cut down
// on compile time
//
namespace faiss { namespace gpu {
template <int ThreadsPerBlock, int NumWarpQ, int NumThreadQ, bool Dir>
__global__ void
pass1SelectLists(void** listIndices,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<int, 2, true> topQueryToCentroid,
Tensor<uint8_t, 1, true> bitset,
Tensor<float, 1, true> distance,
int nprobe,
int k,
IndicesOptions opt,
Tensor<float, 3, true> heapDistances,
Tensor<int, 3, true> heapIndices) {
constexpr int kNumWarps = ThreadsPerBlock / kWarpSize;
__shared__ float smemK[kNumWarps * NumWarpQ];
__shared__ int smemV[kNumWarps * NumWarpQ];
constexpr auto kInit = Dir ? kFloatMin : kFloatMax;
BlockSelect<float, int, Dir, Comparator<float>,
NumWarpQ, NumThreadQ, ThreadsPerBlock>
heap(kInit, -1, smemK, smemV, k);
auto queryId = blockIdx.y;
auto sliceId = blockIdx.x;
auto numSlices = gridDim.x;
int sliceSize = (nprobe / numSlices);
int sliceStart = sliceSize * sliceId;
int sliceEnd = sliceId == (numSlices - 1) ? nprobe :
sliceStart + sliceSize;
auto offsets = prefixSumOffsets[queryId].data();
// We ensure that before the array (at offset -1), there is a 0 value
int start = *(&offsets[sliceStart] - 1);
int end = offsets[sliceEnd - 1];
int num = end - start;
int limit = utils::roundDown(num, kWarpSize);
int i = threadIdx.x;
auto distanceStart = distance[start].data();
bool bitsetEmpty = (bitset.getSize(0) == 0);
long index = -1;
// BlockSelect add cannot be used in a warp divergent circumstance; we
// handle the remainder warp below
for (; i < limit; i += blockDim.x) {
index = getListIndex(queryId,
start + i,
listIndices,
prefixSumOffsets,
topQueryToCentroid,
opt);
if (bitsetEmpty || (!(bitset[index >> 3] & (0x1 << (index & 0x7))))) {
heap.addThreadQ(distanceStart[i], start + i);
}
heap.checkThreadQ();
}
// Handle warp divergence separately
if (i < num) {
index = getListIndex(queryId,
start + i,
listIndices,
prefixSumOffsets,
topQueryToCentroid,
opt);
if (bitsetEmpty || (!(bitset[index >> 3] & (0x1 << (index & 0x7))))) {
heap.addThreadQ(distanceStart[i], start + i);
}
}
// Merge all final results
heap.reduce();
// Write out the final k-selected values; they should be all
// together
for (int i = threadIdx.x; i < k; i += blockDim.x) {
heapDistances[queryId][sliceId][i] = smemK[i];
heapIndices[queryId][sliceId][i] = smemV[i];
}
}
void
runPass1SelectLists(thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<int, 2, true>& topQueryToCentroid,
Tensor<uint8_t, 1, true>& bitset,
Tensor<float, 1, true>& distance,
int nprobe,
int k,
bool chooseLargest,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
cudaStream_t stream) {
// This is caught at a higher level
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
auto grid = dim3(heapDistances.getSize(1), prefixSumOffsets.getSize(0));
#define RUN_PASS(BLOCK, NUM_WARP_Q, NUM_THREAD_Q, DIR) \
do { \
pass1SelectLists<BLOCK, NUM_WARP_Q, NUM_THREAD_Q, DIR> \
<<<grid, BLOCK, 0, stream>>>(listIndices.data().get(), \
prefixSumOffsets, \
topQueryToCentroid, \
bitset, \
distance, \
nprobe, \
k, \
indicesOptions, \
heapDistances, \
heapIndices); \
CUDA_TEST_ERROR(); \
return; /* success */ \
} while (0)
#if GPU_MAX_SELECTION_K >= 2048
// block size 128 for k <= 1024, 64 for k = 2048
#define RUN_PASS_DIR(DIR) \
do { \
if (k == 1) { \
RUN_PASS(128, 1, 1, DIR); \
} else if (k <= 32) { \
RUN_PASS(128, 32, 2, DIR); \
} else if (k <= 64) { \
RUN_PASS(128, 64, 3, DIR); \
} else if (k <= 128) { \
RUN_PASS(128, 128, 3, DIR); \
} else if (k <= 256) { \
RUN_PASS(128, 256, 4, DIR); \
} else if (k <= 512) { \
RUN_PASS(128, 512, 8, DIR); \
} else if (k <= 1024) { \
RUN_PASS(128, 1024, 8, DIR); \
} else if (k <= 2048) { \
RUN_PASS(64, 2048, 8, DIR); \
} \
} while (0)
#else
#define RUN_PASS_DIR(DIR) \
do { \
if (k == 1) { \
RUN_PASS(128, 1, 1, DIR); \
} else if (k <= 32) { \
RUN_PASS(128, 32, 2, DIR); \
} else if (k <= 64) { \
RUN_PASS(128, 64, 3, DIR); \
} else if (k <= 128) { \
RUN_PASS(128, 128, 3, DIR); \
} else if (k <= 256) { \
RUN_PASS(128, 256, 4, DIR); \
} else if (k <= 512) { \
RUN_PASS(128, 512, 8, DIR); \
} else if (k <= 1024) { \
RUN_PASS(128, 1024, 8, DIR); \
} \
} while (0)
#endif // GPU_MAX_SELECTION_K
if (chooseLargest) {
RUN_PASS_DIR(true);
} else {
RUN_PASS_DIR(false);
}
#undef RUN_PASS_DIR
#undef RUN_PASS
}
} } // namespace
|
0b3ce5004133bac57ca16ccf5dade8c5fb2da3e2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// srad kernel
__global__ void srad2(const fp d_lambda,
const int d_Nr,
const int d_Nc,
const long d_Ne,
const int *d_iN,
const int *d_iS,
const int *d_jE,
const int *d_jW,
const fp *d_dN,
const fp *d_dS,
const fp *d_dE,
const fp *d_dW,
const fp *d_c,
fp *d_I)
{
// indexes
int bx = blockIdx.x; // get current horizontal block index
int tx = threadIdx.x; // get current horizontal thread index
int ei = bx*NUMBER_THREADS+tx; // more threads than actual elements !!!
int row; // column, x position
int col; // row, y position
// variables
fp d_cN,d_cS,d_cW,d_cE;
fp d_D;
// figure out row/col location in new matrix
row = (ei+1) % d_Nr - 1; // (0-n) row
col = (ei+1) / d_Nr + 1 - 1; // (0-n) column
if((ei+1) % d_Nr == 0){
row = d_Nr - 1;
col = col - 1;
}
if(ei<d_Ne){ // make sure that only threads matching jobs run
// diffusion coefficent
d_cN = d_c[ei]; // north diffusion coefficient
d_cS = d_c[d_iS[row] + d_Nr*col]; // south diffusion coefficient
d_cW = d_c[ei]; // west diffusion coefficient
d_cE = d_c[row + d_Nr * d_jE[col]]; // east diffusion coefficient
// divergence (equ 58)
d_D = d_cN*d_dN[ei] + d_cS*d_dS[ei] + d_cW*d_dW[ei] + d_cE*d_dE[ei];// divergence
// image update (equ 61) (every element of IMAGE)
d_I[ei] = d_I[ei] + (fp)0.25*d_lambda*d_D;// updates image (based on input time step and divergence)
}
}
|
0b3ce5004133bac57ca16ccf5dade8c5fb2da3e2.cu
|
// srad kernel
__global__ void srad2(const fp d_lambda,
const int d_Nr,
const int d_Nc,
const long d_Ne,
const int *d_iN,
const int *d_iS,
const int *d_jE,
const int *d_jW,
const fp *d_dN,
const fp *d_dS,
const fp *d_dE,
const fp *d_dW,
const fp *d_c,
fp *d_I)
{
// indexes
int bx = blockIdx.x; // get current horizontal block index
int tx = threadIdx.x; // get current horizontal thread index
int ei = bx*NUMBER_THREADS+tx; // more threads than actual elements !!!
int row; // column, x position
int col; // row, y position
// variables
fp d_cN,d_cS,d_cW,d_cE;
fp d_D;
// figure out row/col location in new matrix
row = (ei+1) % d_Nr - 1; // (0-n) row
col = (ei+1) / d_Nr + 1 - 1; // (0-n) column
if((ei+1) % d_Nr == 0){
row = d_Nr - 1;
col = col - 1;
}
if(ei<d_Ne){ // make sure that only threads matching jobs run
// diffusion coefficent
d_cN = d_c[ei]; // north diffusion coefficient
d_cS = d_c[d_iS[row] + d_Nr*col]; // south diffusion coefficient
d_cW = d_c[ei]; // west diffusion coefficient
d_cE = d_c[row + d_Nr * d_jE[col]]; // east diffusion coefficient
// divergence (equ 58)
d_D = d_cN*d_dN[ei] + d_cS*d_dS[ei] + d_cW*d_dW[ei] + d_cE*d_dE[ei];// divergence
// image update (equ 61) (every element of IMAGE)
d_I[ei] = d_I[ei] + (fp)0.25*d_lambda*d_D;// updates image (based on input time step and divergence)
}
}
|
35d91a7f3f6b1cfd2d8b51a4fb7f5d88182848b5.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef IMP_CU_MEDIAN3X3_IMPL_CU
#define IMP_CU_MEDIAN3X3_IMPL_CU
#include <imp/cu_imgproc/cu_image_filter.cuh>
#include <cstdint>
#include <cfloat>
#include <hip/hip_runtime.h>
#include <imp/core/types.hpp>
#include <imp/core/pixel.hpp>
#include <imp/core/roi.hpp>
#include <imp/cu_core/cu_image_gpu.cuh>
#include <imp/cu_core/cu_utils.hpp>
#include <imp/cu_core/cu_texture.cuh>
namespace imp {
namespace cu {
//-----------------------------------------------------------------------------
template<typename Pixel>
__global__ void k_median3x3(Pixel* dst, const size_type stride,
const std::uint32_t xoff, const std::uint32_t yoff,
const std::uint32_t width, const std::uint32_t height,
Texture2D src_tex)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x>=0 && y>= 0 && x<width && y<height)
{
x += xoff;
y += yoff;
// shared mem coords
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
// we have a 3x3 kernel, so our width of the shared memory (shp) is blockDim.x + 2!
const int shp = blockDim.x + 2;
const int shc = ty*shp + tx;
extern __shared__ float sh_in[];
// Load input 3x3 block into shared memory
// Note: the FLT_MAX prevents us from overemphasizing the border pixels if they are outliers!
{
// for each thread: copy the data of the current input position to shared mem
Pixel texel;
src_tex.fetch(texel, x, y);
sh_in[shc] = texel;
/////////////////////////////////////////////////////////////////////////////
// boundary conditions
/////////////////////////////////////////////////////////////////////////////
if (x == 0) // at left image border
{
if (y == 0)
sh_in[shc-shp-1] = FLT_MAX; // left-upper corner (image)
else if (ty == 1)
{
// left-upper corner (block)
src_tex.fetch(texel, x, y-1.f);
sh_in[shc-shp-1] = texel;
}
sh_in[shc-1] = sh_in[shc]; // left border (image)
if (y == height-1)
sh_in[shc+shp-1] = FLT_MAX; // left-lower corner (image)
else if (ty == blockDim.y)
{
src_tex.fetch(texel, x, y+1);
sh_in[shc+shp-1] = texel; // left-lower corner (block)
}
}
else if (tx == 1) // at left block border (inside image w.r.t x)
{
if (y == 0)
{
src_tex.fetch(texel, x-1, y);
sh_in[shc-shp-1] = texel; // left-upper corner (block, outside)
}
else if (ty == 1)
{
src_tex.fetch(texel, x-1, y-1);
sh_in[shc-shp-1] = texel; // left-upper corner (block, inside)
}
src_tex.fetch(texel, x-1, y);
sh_in[shc-1] = texel; // left border (block)
if (y == height-1)
{
src_tex.fetch(texel, x-1, y);
sh_in[shc+shp-1] = texel; // left-lower corner (block, outside)
}
else if (ty == blockDim.y)
{
src_tex.fetch(texel, x-1, y+1);
sh_in[shc+shp-1] = texel; // left-lower corner (block, inside)
}
}
if (x == width-1) // at right image border
{
if (y == 0)
sh_in[shc-shp+1] = FLT_MAX; // right-upper corner (image)
else if (ty == 1)
{
src_tex.fetch(texel, x, y-1);
sh_in[shc-shp+1] = texel; // right-upper corner (block)
}
sh_in[shc+1] = sh_in[shc]; // right border (image)
if (y == height-1)
sh_in[shc+shp+1] = FLT_MAX; // right-lower corner (image)
else if (ty == blockDim.y)
{
src_tex.fetch(texel, x, y+1);
sh_in[shc+shp+1] = texel; // right-lower corner (block)
}
}
else if (tx == blockDim.x) // at right block border (inside image w.r.t x)
{
if (y == 0)
{
src_tex.fetch(texel, x+1, y);
sh_in[shc-shp+1] = texel; // right-upper corner (block, outside)
}
else if (ty == 1)
{
src_tex.fetch(texel, x+1, y-1);
sh_in[shc-shp+1] = texel; // right-upper corner (block, inside)
}
src_tex.fetch(texel, x+1, y);
sh_in[shc+1] = texel; // right border (block)
if (y == height-1)
{
src_tex.fetch(texel, x+1, y);
sh_in[shc+shp+1] = texel; // right-lower corner (block, outside)
}
else if (ty == blockDim.y)
{
src_tex.fetch(texel, x+1, y+1);
sh_in[shc+shp+1] = texel; // right-lower corner (block, inside)
}
}
if (y == 0)
sh_in[shc-shp] = sh_in[shc]; // upper border (image)
else if (ty == 1)
{
src_tex.fetch(texel, x, y-1);
sh_in[shc-shp] = texel; // upper border (block)
}
if (y == height-1)
sh_in[shc+shp] = sh_in[shc]; // lower border (image)
else if (ty == blockDim.y)
{
src_tex.fetch(texel, x, y+1);
sh_in[shc+shp] = texel; // lower border (block)
}
__syncthreads();
}
// in a sequence of nine elements, we have to remove four times the maximum from the sequence and need
// a fifth calculated maximum which is the median!
float maximum;
{
float vals[8];
// first 'loop'
vals[0] = fmin(sh_in[shc-shp-1], sh_in[shc-shp]);
maximum = fmax(sh_in[shc-shp-1], sh_in[shc-shp]);
vals[1] = fmin(maximum, sh_in[shc-shp+1]);
maximum = fmax(maximum, sh_in[shc-shp+1]);
vals[2] = fmin(maximum, sh_in[shc-1]);
maximum = fmax(maximum, sh_in[shc-1]);
vals[3] = fmin(maximum, sh_in[shc]);
maximum = fmax(maximum, sh_in[shc]);
vals[4] = fmin(maximum, sh_in[shc+1]);
maximum = fmax(maximum, sh_in[shc+1]);
vals[5] = fmin(maximum, sh_in[shc+shp-1]);
maximum = fmax(maximum, sh_in[shc+shp-1]);
vals[6] = fmin(maximum, sh_in[shc+shp]);
maximum = fmax(maximum, sh_in[shc+shp]);
vals[7] = fmin(maximum, sh_in[shc+shp+1]);
maximum = fmax(maximum, sh_in[shc+shp+1]);
// second 'loop'
maximum = fmax(vals[0], vals[1]);
vals[0] = fmin(vals[0], vals[1]);
vals[1] = maximum;
maximum = fmax(vals[1], vals[2]);
vals[1] = fmin(vals[1], vals[2]);
vals[2] = maximum;
maximum = fmax(vals[2], vals[3]);
vals[2] = fmin(vals[2], vals[3]);
vals[3] = maximum;
maximum = fmax(vals[3], vals[4]);
vals[3] = fmin(vals[3], vals[4]);
vals[4] = maximum;
maximum = fmax(vals[4], vals[5]);
vals[4] = fmin(vals[4], vals[5]);
vals[5] = maximum;
maximum = fmax(vals[5], vals[6]);
vals[5] = fmin(vals[5], vals[6]);
vals[6] = fmin(maximum, vals[7]);
// third 'loop'
maximum = fmax(vals[0], vals[1]);
vals[0] = fmin(vals[0], vals[1]);
vals[1] = maximum;
maximum = fmax(vals[1], vals[2]);
vals[1] = fmin(vals[1], vals[2]);
vals[2] = maximum;
maximum = fmax(vals[2], vals[3]);
vals[2] = fmin(vals[2], vals[3]);
vals[3] = maximum;
maximum = fmax(vals[3], vals[4]);
vals[3] = fmin(vals[3], vals[4]);
vals[4] = maximum;
maximum = fmax(vals[4], vals[5]);
vals[4] = fmin(vals[4], vals[5]);
vals[5] = fmin(maximum, vals[6]);
// 4th 'loop'
maximum = fmax(vals[0], vals[1]);
vals[0] = fmin(vals[0], vals[1]);
vals[1] = maximum;
maximum = fmax(vals[1], vals[2]);
vals[1] = fmin(vals[1], vals[2]);
vals[2] = maximum;
maximum = fmax(vals[2], vals[3]);
vals[2] = fmin(vals[2], vals[3]);
vals[3] = maximum;
maximum = fmax(vals[3], vals[4]);
vals[3] = fmin(vals[3], vals[4]);
vals[4] = fmin(maximum, vals[5]);
// 5th 'loop'
maximum = fmax(vals[0], vals[1]);
maximum = fmax(maximum, vals[2]);
maximum = fmax(maximum, vals[3]);
maximum = fmax(maximum, vals[4]);
}
dst[y*stride+x] = maximum;
}
}
//-----------------------------------------------------------------------------
template<typename Pixel, imp::PixelType pixel_type>
void filterMedian3x3(ImageGpu<Pixel, pixel_type>& dst,
const ImageGpu<Pixel, pixel_type>& src)
{
std::unique_ptr<Texture2D> src_tex =
src.genTexture(false, (src.bitDepth()<32) ? hipFilterModePoint
: hipFilterModeLinear);
constexpr std::uint16_t block_size = 16;
Fragmentation<block_size, block_size> frag(src.roi());
size_type shared_size = (block_size+2)*(block_size+2)*sizeof(float);
Roi2u roi = src.roi();
dst.setRoi(roi);
hipLaunchKernelGGL(( k_median3x3)
,
dim3(frag.dimGrid), dim3(frag.dimBlock), shared_size
, 0,
dst.data(), dst.stride(),
roi.x(), roi.y(), roi.width(), roi.height(), *src_tex);
IMP_CUDA_CHECK();
}
//==============================================================================
//
// template instantiations for all our image types
//
template void filterMedian3x3(ImageGpu8uC1& dst, const ImageGpu8uC1& src);
template void filterMedian3x3(ImageGpu8uC2& dst, const ImageGpu8uC2& src);
template void filterMedian3x3(ImageGpu8uC4& dst, const ImageGpu8uC4& src);
template void filterMedian3x3(ImageGpu16uC1& dst, const ImageGpu16uC1& src);
template void filterMedian3x3(ImageGpu16uC2& dst, const ImageGpu16uC2& src);
template void filterMedian3x3(ImageGpu16uC4& dst, const ImageGpu16uC4& src);
template void filterMedian3x3(ImageGpu32sC1& dst, const ImageGpu32sC1& src);
template void filterMedian3x3(ImageGpu32sC2& dst, const ImageGpu32sC2& src);
template void filterMedian3x3(ImageGpu32sC4& dst, const ImageGpu32sC4& src);
template void filterMedian3x3(ImageGpu32fC1& dst, const ImageGpu32fC1& src);
template void filterMedian3x3(ImageGpu32fC2& dst, const ImageGpu32fC2& src);
template void filterMedian3x3(ImageGpu32fC4& dst, const ImageGpu32fC4& src);
} // namespace cu
} // namespace imp
#endif // IMP_CU_MEDIAN3X3_IMPL_CU
|
35d91a7f3f6b1cfd2d8b51a4fb7f5d88182848b5.cu
|
#ifndef IMP_CU_MEDIAN3X3_IMPL_CU
#define IMP_CU_MEDIAN3X3_IMPL_CU
#include <imp/cu_imgproc/cu_image_filter.cuh>
#include <cstdint>
#include <cfloat>
#include <cuda_runtime.h>
#include <imp/core/types.hpp>
#include <imp/core/pixel.hpp>
#include <imp/core/roi.hpp>
#include <imp/cu_core/cu_image_gpu.cuh>
#include <imp/cu_core/cu_utils.hpp>
#include <imp/cu_core/cu_texture.cuh>
namespace imp {
namespace cu {
//-----------------------------------------------------------------------------
template<typename Pixel>
__global__ void k_median3x3(Pixel* dst, const size_type stride,
const std::uint32_t xoff, const std::uint32_t yoff,
const std::uint32_t width, const std::uint32_t height,
Texture2D src_tex)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x>=0 && y>= 0 && x<width && y<height)
{
x += xoff;
y += yoff;
// shared mem coords
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
// we have a 3x3 kernel, so our width of the shared memory (shp) is blockDim.x + 2!
const int shp = blockDim.x + 2;
const int shc = ty*shp + tx;
extern __shared__ float sh_in[];
// Load input 3x3 block into shared memory
// Note: the FLT_MAX prevents us from overemphasizing the border pixels if they are outliers!
{
// for each thread: copy the data of the current input position to shared mem
Pixel texel;
src_tex.fetch(texel, x, y);
sh_in[shc] = texel;
/////////////////////////////////////////////////////////////////////////////
// boundary conditions
/////////////////////////////////////////////////////////////////////////////
if (x == 0) // at left image border
{
if (y == 0)
sh_in[shc-shp-1] = FLT_MAX; // left-upper corner (image)
else if (ty == 1)
{
// left-upper corner (block)
src_tex.fetch(texel, x, y-1.f);
sh_in[shc-shp-1] = texel;
}
sh_in[shc-1] = sh_in[shc]; // left border (image)
if (y == height-1)
sh_in[shc+shp-1] = FLT_MAX; // left-lower corner (image)
else if (ty == blockDim.y)
{
src_tex.fetch(texel, x, y+1);
sh_in[shc+shp-1] = texel; // left-lower corner (block)
}
}
else if (tx == 1) // at left block border (inside image w.r.t x)
{
if (y == 0)
{
src_tex.fetch(texel, x-1, y);
sh_in[shc-shp-1] = texel; // left-upper corner (block, outside)
}
else if (ty == 1)
{
src_tex.fetch(texel, x-1, y-1);
sh_in[shc-shp-1] = texel; // left-upper corner (block, inside)
}
src_tex.fetch(texel, x-1, y);
sh_in[shc-1] = texel; // left border (block)
if (y == height-1)
{
src_tex.fetch(texel, x-1, y);
sh_in[shc+shp-1] = texel; // left-lower corner (block, outside)
}
else if (ty == blockDim.y)
{
src_tex.fetch(texel, x-1, y+1);
sh_in[shc+shp-1] = texel; // left-lower corner (block, inside)
}
}
if (x == width-1) // at right image border
{
if (y == 0)
sh_in[shc-shp+1] = FLT_MAX; // right-upper corner (image)
else if (ty == 1)
{
src_tex.fetch(texel, x, y-1);
sh_in[shc-shp+1] = texel; // right-upper corner (block)
}
sh_in[shc+1] = sh_in[shc]; // right border (image)
if (y == height-1)
sh_in[shc+shp+1] = FLT_MAX; // right-lower corner (image)
else if (ty == blockDim.y)
{
src_tex.fetch(texel, x, y+1);
sh_in[shc+shp+1] = texel; // right-lower corner (block)
}
}
else if (tx == blockDim.x) // at right block border (inside image w.r.t x)
{
if (y == 0)
{
src_tex.fetch(texel, x+1, y);
sh_in[shc-shp+1] = texel; // right-upper corner (block, outside)
}
else if (ty == 1)
{
src_tex.fetch(texel, x+1, y-1);
sh_in[shc-shp+1] = texel; // right-upper corner (block, inside)
}
src_tex.fetch(texel, x+1, y);
sh_in[shc+1] = texel; // right border (block)
if (y == height-1)
{
src_tex.fetch(texel, x+1, y);
sh_in[shc+shp+1] = texel; // right-lower corner (block, outside)
}
else if (ty == blockDim.y)
{
src_tex.fetch(texel, x+1, y+1);
sh_in[shc+shp+1] = texel; // right-lower corner (block, inside)
}
}
if (y == 0)
sh_in[shc-shp] = sh_in[shc]; // upper border (image)
else if (ty == 1)
{
src_tex.fetch(texel, x, y-1);
sh_in[shc-shp] = texel; // upper border (block)
}
if (y == height-1)
sh_in[shc+shp] = sh_in[shc]; // lower border (image)
else if (ty == blockDim.y)
{
src_tex.fetch(texel, x, y+1);
sh_in[shc+shp] = texel; // lower border (block)
}
__syncthreads();
}
// in a sequence of nine elements, we have to remove four times the maximum from the sequence and need
// a fifth calculated maximum which is the median!
float maximum;
{
float vals[8];
// first 'loop'
vals[0] = fmin(sh_in[shc-shp-1], sh_in[shc-shp]);
maximum = fmax(sh_in[shc-shp-1], sh_in[shc-shp]);
vals[1] = fmin(maximum, sh_in[shc-shp+1]);
maximum = fmax(maximum, sh_in[shc-shp+1]);
vals[2] = fmin(maximum, sh_in[shc-1]);
maximum = fmax(maximum, sh_in[shc-1]);
vals[3] = fmin(maximum, sh_in[shc]);
maximum = fmax(maximum, sh_in[shc]);
vals[4] = fmin(maximum, sh_in[shc+1]);
maximum = fmax(maximum, sh_in[shc+1]);
vals[5] = fmin(maximum, sh_in[shc+shp-1]);
maximum = fmax(maximum, sh_in[shc+shp-1]);
vals[6] = fmin(maximum, sh_in[shc+shp]);
maximum = fmax(maximum, sh_in[shc+shp]);
vals[7] = fmin(maximum, sh_in[shc+shp+1]);
maximum = fmax(maximum, sh_in[shc+shp+1]);
// second 'loop'
maximum = fmax(vals[0], vals[1]);
vals[0] = fmin(vals[0], vals[1]);
vals[1] = maximum;
maximum = fmax(vals[1], vals[2]);
vals[1] = fmin(vals[1], vals[2]);
vals[2] = maximum;
maximum = fmax(vals[2], vals[3]);
vals[2] = fmin(vals[2], vals[3]);
vals[3] = maximum;
maximum = fmax(vals[3], vals[4]);
vals[3] = fmin(vals[3], vals[4]);
vals[4] = maximum;
maximum = fmax(vals[4], vals[5]);
vals[4] = fmin(vals[4], vals[5]);
vals[5] = maximum;
maximum = fmax(vals[5], vals[6]);
vals[5] = fmin(vals[5], vals[6]);
vals[6] = fmin(maximum, vals[7]);
// third 'loop'
maximum = fmax(vals[0], vals[1]);
vals[0] = fmin(vals[0], vals[1]);
vals[1] = maximum;
maximum = fmax(vals[1], vals[2]);
vals[1] = fmin(vals[1], vals[2]);
vals[2] = maximum;
maximum = fmax(vals[2], vals[3]);
vals[2] = fmin(vals[2], vals[3]);
vals[3] = maximum;
maximum = fmax(vals[3], vals[4]);
vals[3] = fmin(vals[3], vals[4]);
vals[4] = maximum;
maximum = fmax(vals[4], vals[5]);
vals[4] = fmin(vals[4], vals[5]);
vals[5] = fmin(maximum, vals[6]);
// 4th 'loop'
maximum = fmax(vals[0], vals[1]);
vals[0] = fmin(vals[0], vals[1]);
vals[1] = maximum;
maximum = fmax(vals[1], vals[2]);
vals[1] = fmin(vals[1], vals[2]);
vals[2] = maximum;
maximum = fmax(vals[2], vals[3]);
vals[2] = fmin(vals[2], vals[3]);
vals[3] = maximum;
maximum = fmax(vals[3], vals[4]);
vals[3] = fmin(vals[3], vals[4]);
vals[4] = fmin(maximum, vals[5]);
// 5th 'loop'
maximum = fmax(vals[0], vals[1]);
maximum = fmax(maximum, vals[2]);
maximum = fmax(maximum, vals[3]);
maximum = fmax(maximum, vals[4]);
}
dst[y*stride+x] = maximum;
}
}
//-----------------------------------------------------------------------------
template<typename Pixel, imp::PixelType pixel_type>
void filterMedian3x3(ImageGpu<Pixel, pixel_type>& dst,
const ImageGpu<Pixel, pixel_type>& src)
{
std::unique_ptr<Texture2D> src_tex =
src.genTexture(false, (src.bitDepth()<32) ? cudaFilterModePoint
: cudaFilterModeLinear);
constexpr std::uint16_t block_size = 16;
Fragmentation<block_size, block_size> frag(src.roi());
size_type shared_size = (block_size+2)*(block_size+2)*sizeof(float);
Roi2u roi = src.roi();
dst.setRoi(roi);
k_median3x3
<<<
frag.dimGrid, frag.dimBlock, shared_size
>>> (
dst.data(), dst.stride(),
roi.x(), roi.y(), roi.width(), roi.height(), *src_tex);
IMP_CUDA_CHECK();
}
//==============================================================================
//
// template instantiations for all our image types
//
template void filterMedian3x3(ImageGpu8uC1& dst, const ImageGpu8uC1& src);
template void filterMedian3x3(ImageGpu8uC2& dst, const ImageGpu8uC2& src);
template void filterMedian3x3(ImageGpu8uC4& dst, const ImageGpu8uC4& src);
template void filterMedian3x3(ImageGpu16uC1& dst, const ImageGpu16uC1& src);
template void filterMedian3x3(ImageGpu16uC2& dst, const ImageGpu16uC2& src);
template void filterMedian3x3(ImageGpu16uC4& dst, const ImageGpu16uC4& src);
template void filterMedian3x3(ImageGpu32sC1& dst, const ImageGpu32sC1& src);
template void filterMedian3x3(ImageGpu32sC2& dst, const ImageGpu32sC2& src);
template void filterMedian3x3(ImageGpu32sC4& dst, const ImageGpu32sC4& src);
template void filterMedian3x3(ImageGpu32fC1& dst, const ImageGpu32fC1& src);
template void filterMedian3x3(ImageGpu32fC2& dst, const ImageGpu32fC2& src);
template void filterMedian3x3(ImageGpu32fC4& dst, const ImageGpu32fC4& src);
} // namespace cu
} // namespace imp
#endif // IMP_CU_MEDIAN3X3_IMPL_CU
|
b9e50328e4b38e4d6d2afbd51d7b94b145abc1c9.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* cudalib.cpp
*
* Created on: 2018517
* Author: root
*/
//#include <src/cudalib.h>
//#include <stdio.h>
//#include "hip/hip_runtime.h"
//#include "nccl.h"
//#include <string>
//#include "src/cudalib.h"
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
#include "nccl.h"
#include "file_input.h"
#include <src/cudalib.h>
#include <hip/hip_fp16.h>
#include <string>
using namespace std;
//
template <class T>
__host__ __device__ void len(const char*info,T*result)
{
int i=0;//index
char frist_mark=(byte)0;
while(*(info+i)!='@'){
if((*(info+i)=='.' or *(info+i)=='_')){
result[0]=(T)((int)result[0]+(int)1);
}
if(frist_mark==(byte)1)//if frist '.'
{result[1]=(T)((int)result[1]+(int)1);}
if(frist_mark==(byte)0 and *(info+i)=='.')//if frist '.'
{ frist_mark=(byte)1;
}
i=i-1;
}
result[2]=(T)(abs(i)-2);
}
template <class T>
__global__ void split_global(T* dum, char* info,long start,long length,int dimblock)
{ extern __shared__ byte s[];
if (threadIdx.x==0){
memset(s,(byte)0,3*dimblock*sizeof(T));
}
__syncthreads();
// T* temp=(T*)malloc(2*sizeof(T));
T temp[3];
long length_N = length;
int step = gridDim.x*blockDim.x;
const long start_P=start;//
long start_N =threadIdx.x+blockIdx.x*blockDim.x;
for(long start=start_N;start<length_N;start=start+step)
{
if((char)*(info+start+start_P)=='\n')
{ temp[0]=0;
temp[1]=0;
temp[2]=0;
len(info+start+start_P,temp);
if((int)temp[0]>(int)s[threadIdx.x*3]){
s[threadIdx.x*3]=temp[0];
}
if((int)temp[1]>(int)s[threadIdx.x*3+1]){
s[threadIdx.x*3+1]=temp[1];
}
if((int)temp[2]>(int)s[threadIdx.x*3+2]){
s[threadIdx.x*3+2]=temp[2];
}
}
}
free(temp);
//
__syncthreads();
if(threadIdx.x==0)
memcpy(dum+3*blockIdx.x*blockDim.x*sizeof(T),s,3*blockDim.x*sizeof(T));
__syncthreads();
}
//hash
//dimGrid_N, dimBlock_N,0,s[i]>>>(d_result[i]+h_len_result[deviceCount-2]*max_an_len*max_an_num,d_info[i],max_an_len,max_an_num,(deviceCount-1)*sub_length,sub_length+yu,dimBlock_N
template <class T>
__global__ void scut2ancestors(char* des,int max_an_len,int max_an_num,char* info,long start,long length,unsigned long long int* mark,int dimblock)
{ //thread"_".
extern __shared__ byte s[];
long length_N = length;
int step = gridDim.x*blockDim.x;
const long start_P=start;//
long start_N =threadIdx.x+blockIdx.x*blockDim.x;
if(threadIdx.x==0){
*mark=0;
memset(s,(byte)0,max_an_num*dimblock*sizeof(T));
}
__syncthreads();
for(long start=start_N;start<length_N;start=start+step)
{
if((char)*(info+start+start_P)=='\n')
{ //\n
//1-----------------------------------------------------
int dian_i=0;//"."
int last_i=0;//"@"
int first_dian_mark=0;//"."
int i=0;
while(*(info+start+start_P+i)!='@')
{if(*(info+start+start_P+i)=='.' and first_dian_mark==0)
{dian_i=i;
first_dian_mark=1;
}
i=i-1;
}
last_i=i;
// printf("a:=%c \n",*(info+start+start_P+i));
dian_i=dian_i-1;//info+start+start_P+dian_i
last_i=last_i+1;//info+start+start_P+last_i
//----------------------------------------------------
// //2----"."
int last_i_N=last_i;//
int dian_i_N=dian_i+1;//.
int an_num=0;//
while(last_i<=dian_i_N and an_num<max_an_num){
if(*(info+start+start_P+last_i)=='.' or *(info+start+start_P+last_i)=='_'){
s[threadIdx.x*max_an_num+an_num]=last_i;
// printf("*(info+start+start_P+s[w]):%c,theid:=%d \n",*(info+start+start_P+s[threadIdx.x*max_an_num+an_num]),threadIdx.x*max_an_num+an_num);
an_num=an_num+1;
}
last_i=last_i+1;
}
// //3
an_num=0;
unsigned long long int position=0;
//(*(info+start+start_P+s[threadIdx.x*max_an_num+an_num])=='.' or *(info+start+start_P+s[threadIdx.x*max_an_num+an_num])=='_') and
while((*(info+start+start_P+s[threadIdx.x*max_an_num+an_num])=='.' or *(info+start+start_P+s[threadIdx.x*max_an_num+an_num])=='_') and an_num<max_an_num){
position=(unsigned long long int )atomicAdd((unsigned long long int *)mark,(unsigned long long int )1);
memcpy(des+position*max_an_len,info+start+start_P+last_i_N,s[threadIdx.x*max_an_num+an_num]-last_i_N);
*(des+position*max_an_len+s[threadIdx.x*max_an_num+an_num]-last_i_N)='\0';
// if(*(des+position*max_an_len+s[threadIdx.x*max_an_num+an_num]-last_i_N-1)=='.')
// {char* temp=(char *)malloc(-last_i_N);
// memcpy(temp,info+start+start_P+last_i_N,-last_i_N);
// *(temp-last_i_N)='\0';
// printf("n:=%s||%s||%c,%c,%d,%d \n",des+position*max_an_len,temp,*(info+start+start_P+dian_i),*(info+start+start_P+last_i_N),(int)s[threadIdx.x*max_an_num+an_num],an_num);
// }
an_num=an_num+1;
}
//
position=(unsigned long long int )atomicAdd((unsigned long long int *)mark,(unsigned long long int )1);
memcpy(des+position*max_an_len,info+start+start_P+last_i_N,-last_i_N+1);
*(des+position*max_an_len-last_i_N)='\0';
// if(*(des+position*max_an_len)=='\0')
// {char* temp=(char *)malloc(-last_i_N+1);
// memcpy(temp,info+start+start_P+last_i_N,-last_i_N);
// *(temp-last_i_N)='\0';
// printf("ok::%s||%s||%c,%c,%d,%d \n",des+position*max_an_len,temp,*(info+start+start_P+dian_i),*(info+start+start_P+last_i_N),(int)s[threadIdx.x*max_an_num+an_num],an_num);
// delete temp;
// }
// printf("ok:=%s \n",des+position*max_an_len);
__syncthreads();
if(threadIdx.x==0){memset(s,(byte)0,max_an_num*dimblock*sizeof(T));}
__syncthreads();
}
}
}
template __host__ __device__ void len<ubyte>(const char*,ubyte *);
template __host__ __device__ void len<byte>(const char*,byte *);
template __host__ __device__ void len<int>(const char*,int *);
template __global__ void split_global<ubyte>(ubyte*, char*,long,long,int);
template __global__ void split_global<byte>(byte*, char*,long,long,int);
template __global__ void split_global<int>(int*, char*,long,long,int);
template __global__ void scut2ancestors<byte>(char*,int ,int ,char*,long,long,unsigned long long int *,int);
template __global__ void scut2ancestors<ubyte>(char*,int ,int ,char*,long,long,unsigned long long int *,int);
template __global__ void scut2ancestors<int>(char*,int ,int ,char*,long,long,unsigned long long int *,int);
|
b9e50328e4b38e4d6d2afbd51d7b94b145abc1c9.cu
|
/*
* cudalib.cpp
*
* Created on: 2018年5月17日
* Author: root
*/
//#include <src/cudalib.h>
//#include <stdio.h>
//#include "cuda_runtime.h"
//#include "nccl.h"
//#include <string>
//#include "src/cudalib.h"
#include "device_launch_parameters.h"
#include "cuda_runtime.h"
#include "nccl.h"
#include "file_input.h"
#include <src/cudalib.h>
#include <cuda_fp16.h>
#include <string>
using namespace std;
//统计最大祖先个数、最大祖先长度和最大单条长度
template <class T>
__host__ __device__ void len(const char*info,T*result)
{
int i=0;//index
char frist_mark=(byte)0;
while(*(info+i)!='@'){
if((*(info+i)=='.' or *(info+i)=='_')){
result[0]=(T)((int)result[0]+(int)1);
}
if(frist_mark==(byte)1)//if frist '.'
{result[1]=(T)((int)result[1]+(int)1);}
if(frist_mark==(byte)0 and *(info+i)=='.')//if frist '.'
{ frist_mark=(byte)1;
}
i=i-1;
}
result[2]=(T)(abs(i)-2);
}
template <class T>
__global__ void split_global(T* dum, char* info,long start,long length,int dimblock)
{ extern __shared__ byte s[];
if (threadIdx.x==0){
memset(s,(byte)0,3*dimblock*sizeof(T));
}
__syncthreads();
// T* temp=(T*)malloc(2*sizeof(T));
T temp[3];
long length_N = length;
int step = gridDim.x*blockDim.x;
const long start_P=start;//开始的位置
long start_N =threadIdx.x+blockIdx.x*blockDim.x;
for(long start=start_N;start<length_N;start=start+step)
{
if((char)*(info+start+start_P)=='\n')
{ temp[0]=0;
temp[1]=0;
temp[2]=0;
len(info+start+start_P,temp);
if((int)temp[0]>(int)s[threadIdx.x*3]){
s[threadIdx.x*3]=temp[0];
}
if((int)temp[1]>(int)s[threadIdx.x*3+1]){
s[threadIdx.x*3+1]=temp[1];
}
if((int)temp[2]>(int)s[threadIdx.x*3+2]){
s[threadIdx.x*3+2]=temp[2];
}
}
}
free(temp);
//同步
__syncthreads();
if(threadIdx.x==0)
memcpy(dum+3*blockIdx.x*blockDim.x*sizeof(T),s,3*blockDim.x*sizeof(T));
__syncthreads();
}
//切割出所有祖先,为放入hash表用
//dimGrid_N, dimBlock_N,0,s[i]>>>(d_result[i]+h_len_result[deviceCount-2]*max_an_len*max_an_num,d_info[i],max_an_len,max_an_num,(deviceCount-1)*sub_length,sub_length+yu,dimBlock_N
template <class T>
__global__ void scut2ancestors(char* des,int max_an_len,int max_an_num,char* info,long start,long length,unsigned long long int* mark,int dimblock)
{ //为每个thread分配空间记录当前分解记录的各"_"和“.”的位置
extern __shared__ byte s[];
long length_N = length;
int step = gridDim.x*blockDim.x;
const long start_P=start;//开始的位置
long start_N =threadIdx.x+blockIdx.x*blockDim.x;
if(threadIdx.x==0){
*mark=0;
memset(s,(byte)0,max_an_num*dimblock*sizeof(T));
}
__syncthreads();
for(long start=start_N;start<length_N;start=start+step)
{
if((char)*(info+start+start_P)=='\n')
{ //所有的相对位置都是相对于每个‘\n’回车符号来的
//1、-----------------------------------------------------
int dian_i=0;//第一个"."前面的最大祖先的根位置
int last_i=0;//开始的"@"
int first_dian_mark=0;//是否第一个"."
int i=0;
while(*(info+start+start_P+i)!='@')
{if(*(info+start+start_P+i)=='.' and first_dian_mark==0)
{dian_i=i;
first_dian_mark=1;
}
i=i-1;
}
last_i=i;
// printf("a:=%c \n",*(info+start+start_P+i));
dian_i=dian_i-1;//最大祖先的开始位置info+start+start_P+dian_i
last_i=last_i+1;//最大祖先的结束位置info+start+start_P+last_i
//----------------------------------------------------
// //2、记录各级祖先节点的位置----"."和“—”
int last_i_N=last_i;//保留祖先开始位置记录
int dian_i_N=dian_i+1;//“.”位置
int an_num=0;//祖先数目
while(last_i<=dian_i_N and an_num<max_an_num){
if(*(info+start+start_P+last_i)=='.' or *(info+start+start_P+last_i)=='_'){
s[threadIdx.x*max_an_num+an_num]=last_i;
// printf("*(info+start+start_P+s[w]):%c,theid:=%d \n",*(info+start+start_P+s[threadIdx.x*max_an_num+an_num]),threadIdx.x*max_an_num+an_num);
an_num=an_num+1;
}
last_i=last_i+1;
}
// //3、依次输出各祖先节点
an_num=0;
unsigned long long int position=0;
//(*(info+start+start_P+s[threadIdx.x*max_an_num+an_num])=='.' or *(info+start+start_P+s[threadIdx.x*max_an_num+an_num])=='_') and
while((*(info+start+start_P+s[threadIdx.x*max_an_num+an_num])=='.' or *(info+start+start_P+s[threadIdx.x*max_an_num+an_num])=='_') and an_num<max_an_num){
position=(unsigned long long int )atomicAdd((unsigned long long int *)mark,(unsigned long long int )1);
memcpy(des+position*max_an_len,info+start+start_P+last_i_N,s[threadIdx.x*max_an_num+an_num]-last_i_N);
*(des+position*max_an_len+s[threadIdx.x*max_an_num+an_num]-last_i_N)='\0';
// if(*(des+position*max_an_len+s[threadIdx.x*max_an_num+an_num]-last_i_N-1)=='.')
// {char* temp=(char *)malloc(-last_i_N);
// memcpy(temp,info+start+start_P+last_i_N,-last_i_N);
// *(temp-last_i_N)='\0';
// printf("n:=%s||%s||%c,%c,%d,%d \n",des+position*max_an_len,temp,*(info+start+start_P+dian_i),*(info+start+start_P+last_i_N),(int)s[threadIdx.x*max_an_num+an_num],an_num);
// }
an_num=an_num+1;
}
//放入整条记录
position=(unsigned long long int )atomicAdd((unsigned long long int *)mark,(unsigned long long int )1);
memcpy(des+position*max_an_len,info+start+start_P+last_i_N,-last_i_N+1);
*(des+position*max_an_len-last_i_N)='\0';
// if(*(des+position*max_an_len)=='\0')
// {char* temp=(char *)malloc(-last_i_N+1);
// memcpy(temp,info+start+start_P+last_i_N,-last_i_N);
// *(temp-last_i_N)='\0';
// printf("ok::%s||%s||%c,%c,%d,%d \n",des+position*max_an_len,temp,*(info+start+start_P+dian_i),*(info+start+start_P+last_i_N),(int)s[threadIdx.x*max_an_num+an_num],an_num);
// delete temp;
// }
// printf("ok:=%s \n",des+position*max_an_len);
__syncthreads();
if(threadIdx.x==0){memset(s,(byte)0,max_an_num*dimblock*sizeof(T));}
__syncthreads();
}
}
}
template __host__ __device__ void len<ubyte>(const char*,ubyte *);
template __host__ __device__ void len<byte>(const char*,byte *);
template __host__ __device__ void len<int>(const char*,int *);
template __global__ void split_global<ubyte>(ubyte*, char*,long,long,int);
template __global__ void split_global<byte>(byte*, char*,long,long,int);
template __global__ void split_global<int>(int*, char*,long,long,int);
template __global__ void scut2ancestors<byte>(char*,int ,int ,char*,long,long,unsigned long long int *,int);
template __global__ void scut2ancestors<ubyte>(char*,int ,int ,char*,long,long,unsigned long long int *,int);
template __global__ void scut2ancestors<int>(char*,int ,int ,char*,long,long,unsigned long long int *,int);
|
a37a0838ad92f7bea515052e712a6852ef297173.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018 Preferred Networks, Inc. All rights reserved.
*/
#include <hip/hip_fp16.h>
namespace chainer_trt {
namespace plugin {
template <typename T>
__global__ void broadcast_to_kernel(const T *d_src, T *d_dst,
int *d_i_strides, int *d_o_strides,
int in_size, int out_size,
int nb_dims) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < out_size) {
// calc offset relationship between input & output
int in_idx = 0;
int f = idx;
for(int i = 0; i < nb_dims; i++) {
in_idx += (f / d_o_strides[i]) * d_i_strides[i];
f = f % d_o_strides[i];
}
d_dst[blockIdx.y * out_size + idx] =
d_src[blockIdx.y * in_size + in_idx];
}
}
template <typename T>
void apply_broadcast_to(const T *d_src, T *d_dst, int *d_i_strides,
int *d_o_strides, int in_size, int out_size,
int nb_dims, int batch_size, hipStream_t stream) {
const int thread_size = 1024;
const int block_size = (int)::ceil(1.0 * out_size / thread_size);
dim3 grid(block_size, batch_size);
hipLaunchKernelGGL(( broadcast_to_kernel), dim3(grid), dim3(thread_size), 0, stream,
d_src, d_dst, d_i_strides, d_o_strides, in_size, out_size, nb_dims);
}
template void apply_broadcast_to(const float *, float *, int *, int *, int,
int, int, int, hipStream_t);
template void apply_broadcast_to(const __half *, __half *, int *, int *,
int, int, int, int, hipStream_t);
}
}
|
a37a0838ad92f7bea515052e712a6852ef297173.cu
|
/*
* Copyright (c) 2018 Preferred Networks, Inc. All rights reserved.
*/
#include <cuda_fp16.h>
namespace chainer_trt {
namespace plugin {
template <typename T>
__global__ void broadcast_to_kernel(const T *d_src, T *d_dst,
int *d_i_strides, int *d_o_strides,
int in_size, int out_size,
int nb_dims) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < out_size) {
// calc offset relationship between input & output
int in_idx = 0;
int f = idx;
for(int i = 0; i < nb_dims; i++) {
in_idx += (f / d_o_strides[i]) * d_i_strides[i];
f = f % d_o_strides[i];
}
d_dst[blockIdx.y * out_size + idx] =
d_src[blockIdx.y * in_size + in_idx];
}
}
template <typename T>
void apply_broadcast_to(const T *d_src, T *d_dst, int *d_i_strides,
int *d_o_strides, int in_size, int out_size,
int nb_dims, int batch_size, cudaStream_t stream) {
const int thread_size = 1024;
const int block_size = (int)std::ceil(1.0 * out_size / thread_size);
dim3 grid(block_size, batch_size);
broadcast_to_kernel<<<grid, thread_size, 0, stream>>>(
d_src, d_dst, d_i_strides, d_o_strides, in_size, out_size, nb_dims);
}
template void apply_broadcast_to(const float *, float *, int *, int *, int,
int, int, int, cudaStream_t);
template void apply_broadcast_to(const __half *, __half *, int *, int *,
int, int, int, int, cudaStream_t);
}
}
|
a897ba5b2dbe3c2fb45ec7f23acee5d1306acad9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
int __host__ cu2_sq_func(int x)
{
hipError_t err;
int nDevices = 0;
err = hipGetDeviceCount(&nDevices);
if (err != hipSuccess) {
std::cerr << "nDevices: " << nDevices << std::endl;
std::cerr << "err: " << err << std::endl;
return 1;
}
return x * x;
}
|
a897ba5b2dbe3c2fb45ec7f23acee5d1306acad9.cu
|
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
int __host__ cu2_sq_func(int x)
{
cudaError_t err;
int nDevices = 0;
err = cudaGetDeviceCount(&nDevices);
if (err != cudaSuccess) {
std::cerr << "nDevices: " << nDevices << std::endl;
std::cerr << "err: " << err << std::endl;
return 1;
}
return x * x;
}
|
67bd3b276b5a6d11066389c13a73c369c02a170f.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 1.1
* copyright (c) 2022, Universitat Politcnica de Valncia (UPV), PRHLT Research Centre
* Date: March 2022
* Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected])
* All rights reserved
*/
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <hip/hip_runtime.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include "eddl/hardware/gpu/gpu_kernels.h"
__global__ void gpu_max_d(float *D, float *PD, float *map, int size, int reduction_size, bool argmax){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int offset = thread_id_x*reduction_size;
// Choose if we're getting the maximum value or the position
if(argmax) {
int argmax_addr = map[thread_id_x];
PD[offset+argmax_addr] += D[thread_id_x];
}else{
PD[offset+thread_id_x] += D[thread_id_x];;
}
}
}
__global__ void gpu_max(float *A, float *B, int *map, int size, int size_reduction, bool argmax){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp_max = A[*base_addr];
int tmp_argmax = 0;
float val;
for(int i=1; i<size_reduction; i++){
val = A[*(base_addr+i)];
if(val > tmp_max){
tmp_max = val;
tmp_argmax = i;
}
}
// Choose if we're getting the maximum value or the position
if(argmax) {
B[thread_id_x] = (float)tmp_argmax;
}else{
B[thread_id_x] = tmp_max;
}
}
}
__global__ void gpu_min(float *A, float *B, int *map, int size, int size_reduction, bool argmin){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp_min = A[*base_addr];
int tmp_argmin = 0;
float val;
for(int i=1; i<size_reduction; i++){
val = A[*(base_addr+i)];
if(val < tmp_min){
tmp_min = val;
tmp_argmin = i;
}
}
// Choose if we're getting the minimum value or the position
if(argmin) {
B[thread_id_x] = (float)tmp_argmin;
}else{
B[thread_id_x] = tmp_min;
}
}
}
__global__ void gpu_sum(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 0.0f;
for(int i=0; i<size_reduction; i++){
tmp += A[*(base_addr+i)];
}
B[thread_id_x] = tmp;
}
}
__global__ void gpu_sum_abs(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 0.0f;
for(int i=0; i<size_reduction; i++){
tmp += abs(A[*(base_addr+i)]);
}
B[thread_id_x] = tmp;
}
}
__global__ void gpu_prod(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 1.0f;
for(int i=0; i<size_reduction; i++){
tmp *= A[*(base_addr+i)];
}
B[thread_id_x] = tmp;
}
}
__global__ void gpu_mean(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 0.0f;
for(int i=0; i<size_reduction; i++){
tmp += A[*(base_addr+i)];
}
B[thread_id_x] = tmp/(float)size_reduction;
}
}
__global__ void gpu_median(float *A, float *B, int *map, int size, int size_reduction, float *aux){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
// Copy values
long int offset = thread_id_x*size_reduction;
for(int i=0; i<size_reduction; i++){
aux[offset+i] = A[map[offset+i]];
}
// Sort data
thrust::sort(thrust::device, aux + offset, aux + offset + size_reduction);
// Get median
int midpoint = (int)offset + size_reduction/ 2;
if(size_reduction % 2==1 && size_reduction>1) {
B[thread_id_x] = aux[midpoint];
}else{
B[thread_id_x] = (aux[midpoint-1]+aux[midpoint])/2.0f;
}
}
}
__global__ void gpu_var(float *A, float *B, int *map, int size, int size_reduction, bool unbiased){
// IMPORTANT TRICK: B ALREADY CONTAINS THE MEAN!!!!!!!
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp;
float sum = 0.0f;
for(int i=0; i<size_reduction; i++){
tmp = A[*(base_addr+i)] - B[thread_id_x];
sum += tmp*tmp;
}
if(unbiased){
B[thread_id_x] = sum/((float)size_reduction-1.0f);
} else {
B[thread_id_x] = sum/(float)size_reduction;
}
}
}
__global__ void gpu_norm_fro(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 0.0f;
float val;
for(int i=0; i<size_reduction; i++){
val = A[*(base_addr+i)];
tmp += val*val;
}
B[thread_id_x] = sqrt(tmp);
}
}
__global__ void gpu_mode(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
// Copy values
int *values = new int[size_reduction]; // Dynamic allocation is not the best approach
for(int i=0; i<size_reduction; i++){
values[i] = (int)A[*(base_addr+i)];
}
// Sort data
thrust::sort(thrust::seq, values, values + size_reduction);
// Get most frequent element
int most_frequent_val;
int most_frequent_times = 0;
int val = values[0];
int frequency = 1;
for(int i=1; i<size_reduction; i++){
// Check if the value has change
if(val==values[i]){
frequency++;
}else{
val = values[i];
frequency = 1;
}
// Check frequency
if(frequency>most_frequent_times){
most_frequent_val = val;
most_frequent_times = frequency;
}
}
// Assign most frequent value
B[thread_id_x] = (float)most_frequent_val;
// Delete temp array
delete[] values;
}
}
/* PREVIOUS REDUCES ***********************************/
__global__ void reduce_mean(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
atomicAdd(&(B[map[thread_id_x]]),A[thread_id_x]);
}
}
__global__ void reduce_op_sum(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
A[thread_id_x]+=B[map[thread_id_x]];
}
}
__global__ void reduce_op_diff(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
A[thread_id_x]-=B[map[thread_id_x]];
}
}
__global__ void reduce_op_mult(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
A[thread_id_x]*=B[map[thread_id_x]];
}
}
__global__ void reduce_op_div(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
A[thread_id_x]/=B[map[thread_id_x]];
}
}
//dim3 dimGrid(RD->index.size());
//dim3 dimBlock(1);
__global__ void reduction_kernel(float *I,float *O,float *S,int m, int keepdims,int d,int *ind,int rs)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
int j;
float sum=0;
float v,val;
int i;
int p=rs*blockIdx.x;
for(j=0;j<rs;j++,p++) {
v=I[ind[p]];
if (m==2) {
if (j==0) {val=v;i=p;}
else if (v>val) {
val=v;
i=p;
}
}
else if (m==3) {
if (j==0) {val=v;i=p;}
else if (v<val) {
val=v;
i=p;
}
}
else sum+=v;
}
p=rs*blockIdx.x;
// set in Output
if (m<2) { // mean or sum
if (m==0) sum/=d;
if (keepdims) {
for(j=0;j<rs;j++,p++)
O[ind[p]]=sum;
}
else O[thread_id_x]=sum;
}
else { // rs or min
if (keepdims) {
for(j=0;j<rs;j++,p++) {
O[ind[p]]=val;
S[ind[p]]=i;
}
}
else {
O[thread_id_x]=val;
S[thread_id_x]=i;
}
}
}
//dim3 dimGrid(RD->index.size());
//dim3 dimBlock(1);
__global__ void reduction_back_kernel(float *I,float *O,float *S,int m, int keepdims,int d,int *ind,int rs)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
int j;
float val=0;
int p;
// set in Delta
if (m>=2) {
int p=S[thread_id_x];
O[p]+=I[thread_id_x];
}
else {
p=rs*blockIdx.x;
if(keepdims) {
for(j=0;j<rs;j++,p++)
val+=I[ind[p]];
}
else val=I[thread_id_x];
if (m==0) val/=d;
p=rs*blockIdx.x;
for(j=0;j<rs;j++,p++)
O[ind[p]]+=val;
}
}
////////////////////
// FOR SUM and MEAN
// Faster in Conv
///////////////////
//dim3 dimGrid(red_size);
//dim3 dimBlock(RD->index.size());
__global__ void reduction_permute(float *I,float *O,int *ind,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size)
O[thread_id_x]=I[ind[thread_id_x]];
}
__global__ void reduction_kernel_keep(float *red, float *O, int *ind, int size, int rsize)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size*rsize) {
O[ind[thread_id_x]]=red[thread_id_x/rsize];
}
}
__global__ void reduction_kernel_keep_inc(float *red, float *O, int *ind, int size, int rsize)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size*rsize) {
O[ind[thread_id_x]]+=red[thread_id_x/rsize];
}
}
|
67bd3b276b5a6d11066389c13a73c369c02a170f.cu
|
/*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 1.1
* copyright (c) 2022, Universitat Politècnica de València (UPV), PRHLT Research Centre
* Date: March 2022
* Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected])
* All rights reserved
*/
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cuda.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include "eddl/hardware/gpu/gpu_kernels.h"
__global__ void gpu_max_d(float *D, float *PD, float *map, int size, int reduction_size, bool argmax){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int offset = thread_id_x*reduction_size;
// Choose if we're getting the maximum value or the position
if(argmax) {
int argmax_addr = map[thread_id_x];
PD[offset+argmax_addr] += D[thread_id_x];
}else{
PD[offset+thread_id_x] += D[thread_id_x];;
}
}
}
__global__ void gpu_max(float *A, float *B, int *map, int size, int size_reduction, bool argmax){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp_max = A[*base_addr];
int tmp_argmax = 0;
float val;
for(int i=1; i<size_reduction; i++){
val = A[*(base_addr+i)];
if(val > tmp_max){
tmp_max = val;
tmp_argmax = i;
}
}
// Choose if we're getting the maximum value or the position
if(argmax) {
B[thread_id_x] = (float)tmp_argmax;
}else{
B[thread_id_x] = tmp_max;
}
}
}
__global__ void gpu_min(float *A, float *B, int *map, int size, int size_reduction, bool argmin){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp_min = A[*base_addr];
int tmp_argmin = 0;
float val;
for(int i=1; i<size_reduction; i++){
val = A[*(base_addr+i)];
if(val < tmp_min){
tmp_min = val;
tmp_argmin = i;
}
}
// Choose if we're getting the minimum value or the position
if(argmin) {
B[thread_id_x] = (float)tmp_argmin;
}else{
B[thread_id_x] = tmp_min;
}
}
}
__global__ void gpu_sum(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 0.0f;
for(int i=0; i<size_reduction; i++){
tmp += A[*(base_addr+i)];
}
B[thread_id_x] = tmp;
}
}
__global__ void gpu_sum_abs(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 0.0f;
for(int i=0; i<size_reduction; i++){
tmp += abs(A[*(base_addr+i)]);
}
B[thread_id_x] = tmp;
}
}
__global__ void gpu_prod(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 1.0f;
for(int i=0; i<size_reduction; i++){
tmp *= A[*(base_addr+i)];
}
B[thread_id_x] = tmp;
}
}
__global__ void gpu_mean(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 0.0f;
for(int i=0; i<size_reduction; i++){
tmp += A[*(base_addr+i)];
}
B[thread_id_x] = tmp/(float)size_reduction;
}
}
__global__ void gpu_median(float *A, float *B, int *map, int size, int size_reduction, float *aux){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
// Copy values
long int offset = thread_id_x*size_reduction;
for(int i=0; i<size_reduction; i++){
aux[offset+i] = A[map[offset+i]];
}
// Sort data
thrust::sort(thrust::device, aux + offset, aux + offset + size_reduction);
// Get median
int midpoint = (int)offset + size_reduction/ 2;
if(size_reduction % 2==1 && size_reduction>1) {
B[thread_id_x] = aux[midpoint];
}else{
B[thread_id_x] = (aux[midpoint-1]+aux[midpoint])/2.0f;
}
}
}
__global__ void gpu_var(float *A, float *B, int *map, int size, int size_reduction, bool unbiased){
// IMPORTANT TRICK: B ALREADY CONTAINS THE MEAN!!!!!!!
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp;
float sum = 0.0f;
for(int i=0; i<size_reduction; i++){
tmp = A[*(base_addr+i)] - B[thread_id_x];
sum += tmp*tmp;
}
if(unbiased){
B[thread_id_x] = sum/((float)size_reduction-1.0f);
} else {
B[thread_id_x] = sum/(float)size_reduction;
}
}
}
__global__ void gpu_norm_fro(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 0.0f;
float val;
for(int i=0; i<size_reduction; i++){
val = A[*(base_addr+i)];
tmp += val*val;
}
B[thread_id_x] = sqrt(tmp);
}
}
__global__ void gpu_mode(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
// Copy values
int *values = new int[size_reduction]; // Dynamic allocation is not the best approach
for(int i=0; i<size_reduction; i++){
values[i] = (int)A[*(base_addr+i)];
}
// Sort data
thrust::sort(thrust::seq, values, values + size_reduction);
// Get most frequent element
int most_frequent_val;
int most_frequent_times = 0;
int val = values[0];
int frequency = 1;
for(int i=1; i<size_reduction; i++){
// Check if the value has change
if(val==values[i]){
frequency++;
}else{
val = values[i];
frequency = 1;
}
// Check frequency
if(frequency>most_frequent_times){
most_frequent_val = val;
most_frequent_times = frequency;
}
}
// Assign most frequent value
B[thread_id_x] = (float)most_frequent_val;
// Delete temp array
delete[] values;
}
}
/* PREVIOUS REDUCES ***********************************/
__global__ void reduce_mean(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
atomicAdd(&(B[map[thread_id_x]]),A[thread_id_x]);
}
}
__global__ void reduce_op_sum(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
A[thread_id_x]+=B[map[thread_id_x]];
}
}
__global__ void reduce_op_diff(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
A[thread_id_x]-=B[map[thread_id_x]];
}
}
__global__ void reduce_op_mult(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
A[thread_id_x]*=B[map[thread_id_x]];
}
}
__global__ void reduce_op_div(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
A[thread_id_x]/=B[map[thread_id_x]];
}
}
//dim3 dimGrid(RD->index.size());
//dim3 dimBlock(1);
__global__ void reduction_kernel(float *I,float *O,float *S,int m, int keepdims,int d,int *ind,int rs)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
int j;
float sum=0;
float v,val;
int i;
int p=rs*blockIdx.x;
for(j=0;j<rs;j++,p++) {
v=I[ind[p]];
if (m==2) {
if (j==0) {val=v;i=p;}
else if (v>val) {
val=v;
i=p;
}
}
else if (m==3) {
if (j==0) {val=v;i=p;}
else if (v<val) {
val=v;
i=p;
}
}
else sum+=v;
}
p=rs*blockIdx.x;
// set in Output
if (m<2) { // mean or sum
if (m==0) sum/=d;
if (keepdims) {
for(j=0;j<rs;j++,p++)
O[ind[p]]=sum;
}
else O[thread_id_x]=sum;
}
else { // rs or min
if (keepdims) {
for(j=0;j<rs;j++,p++) {
O[ind[p]]=val;
S[ind[p]]=i;
}
}
else {
O[thread_id_x]=val;
S[thread_id_x]=i;
}
}
}
//dim3 dimGrid(RD->index.size());
//dim3 dimBlock(1);
__global__ void reduction_back_kernel(float *I,float *O,float *S,int m, int keepdims,int d,int *ind,int rs)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
int j;
float val=0;
int p;
// set in Delta
if (m>=2) {
int p=S[thread_id_x];
O[p]+=I[thread_id_x];
}
else {
p=rs*blockIdx.x;
if(keepdims) {
for(j=0;j<rs;j++,p++)
val+=I[ind[p]];
}
else val=I[thread_id_x];
if (m==0) val/=d;
p=rs*blockIdx.x;
for(j=0;j<rs;j++,p++)
O[ind[p]]+=val;
}
}
////////////////////
// FOR SUM and MEAN
// Faster in Conv
///////////////////
//dim3 dimGrid(red_size);
//dim3 dimBlock(RD->index.size());
__global__ void reduction_permute(float *I,float *O,int *ind,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size)
O[thread_id_x]=I[ind[thread_id_x]];
}
__global__ void reduction_kernel_keep(float *red, float *O, int *ind, int size, int rsize)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size*rsize) {
O[ind[thread_id_x]]=red[thread_id_x/rsize];
}
}
__global__ void reduction_kernel_keep_inc(float *red, float *O, int *ind, int size, int rsize)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size*rsize) {
O[ind[thread_id_x]]+=red[thread_id_x/rsize];
}
}
|
805723b36614e3530d49af5171aee80a332cc660.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
using namespace std;
#define CHECK(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
cout<< "Error:" << hipGetErrorString(_m_cudaStat) \
<< " at line " << __LINE__ << " in file " << __FILE__ << "\n"; \
exit(1); \
} }
//calculate<<<(rowsY + 255) / 256, 256>>>(dev_original_matrix, dev_result, elementsQuantity, rowsY, colsX, border_number); <<<- grid, - >>>
// + 255 ,
// = 256
__global__ void calculate(int *dev_original_matrix, int *dev_result, int elementsQuantity, int rowsY, int colsX, int border_number)
{
int current_row_number = threadIdx.x + blockIdx.x * blockDim.x; //
// Grid -> -> ( , ), calculate
// blockIdx - 1D-grid, blockDim - -
int r;
int count_points = 0;
for (int i = 1; i < colsX - 1; i++) //
{
//r = matrix[i * colsX + j - 1] - matrix[i * colsX + j + 1]; // 2, ( /);
r = dev_original_matrix[current_row_number * colsX + i - 1] - dev_original_matrix[current_row_number * colsX + i + 1];
if (r > border_number)
{
count_points++;
}
}
dev_result[current_row_number] = count_points;
//cout << "dev_result in GPU claculate :" << "\n";
/*
for (int i = 0; i < current_row_number; i++)
{
cout << dev_result[i] << " ";
}
cout << '\n';
*/
}
//
__global__ void goodCalculation(int *dev_original_matrix, int *dev_result, int elementsQuantity,
int rowsY, int colsX, int border_number)
{
int rowsCountBeforeOrangeLine = blockIdx.x * blockDim.x;
//int bigRowNumber = blockIdx.x * blockDim.x + threadIdx.x;
int cacheWidth = 32; // original
int rectangleHeight = 8; // original
//int rectangleInRowQuantity = colsX / cacheWidth; // original
int rectangleInRowQuantity = (colsX - 2) / (cacheWidth - 2);
__shared__ int cache[256][33];
int r;
int count_points = 0;
int rowInCache = threadIdx.x / cacheWidth; // ( )
int currentRowInCache = rowInCache;
int columnInCache = threadIdx.x % cacheWidth;
int pixelCountUpperRowInTopGreenRect = (rowsCountBeforeOrangeLine + rowInCache) * colsX;
int indexTopPixelInCurrentFPInsideImage = pixelCountUpperRowInTopGreenRect + columnInCache;
int verticalStep = rectangleHeight * colsX;
for (int stringIteration = 0; stringIteration < rectangleInRowQuantity; stringIteration++)
{
int currentPixelInImage = indexTopPixelInCurrentFPInsideImage;
for (int levelInCache = 0; levelInCache < cacheWidth; levelInCache++)
{
cache[currentRowInCache][columnInCache] = dev_original_matrix[currentPixelInImage];
currentRowInCache += rectangleHeight;
currentPixelInImage += verticalStep; // verticalStep
}
indexTopPixelInCurrentFPInsideImage += 30; //
currentRowInCache = rowInCache;
__syncthreads();
r = 0;
// - fixed
for (int i = 1; i < cacheWidth - 1; i++)
{
r = cache[threadIdx.x][i - 1] - cache[threadIdx.x][i + 1];
if (r > border_number) //
count_points = count_points + 1;
}
__syncthreads();
}
dev_result[rowsCountBeforeOrangeLine + threadIdx.x] = count_points; // - fixed
}
void printMatrix(int* matrix, int colsX, int rowsY)
{
for (int i = 0; i < rowsY; i++)
{
for (int j = 0; j < colsX; j++)
{
cout << matrix[i * colsX + j] << "\t";
}
cout << "\n";
}
}
bool checkResult(int* host_result, int* result, int colsX, int rowsY)
{
for (int i = 0; i < 20; i++)
{
cout << "host_result[ " << i << " ] = " << host_result[i] << '\n';
}
for (int i = 0; i < 20; i++)
{
cout << "result[ " << i << " ] = " << result[i] << '\n';
}
for (int i = 0; i < rowsY; i++)
{
if (host_result[i] != result[i])
{
//cout << "host_result[ " << i << " ] = " << host_result[i] << '\n';
//cout << "result[ " << i << " ] = " << result[i] << '\n';
return false;
}
}
return true;
}
int main(void)
{
hipEvent_t startCUDA, stopCUDA, startOptimalCUDA, stopOptimalCUDA;
clock_t startCPU;
float elapsedUsualTimeCUDA, elapsedTimeCPU, elapsedOptimalTime;
// 13. ( ).
// , .
// .
int colsX = 1502; // 30 * 50 + 2 = 1502
int rowsY = 17920; // 256 * 70 = 17920
int elementsQuantity = colsX * rowsY;
cout << "Size in Mbs = " << elementsQuantity * sizeof(int) / 1048576.0 << "\n";
int *matrix = new int[elementsQuantity];
for (int i = 0; i < rowsY; i++)
{
for (int j = 0; j < colsX; j++)
{
matrix[i * colsX + j] = rand() % 255; // filling matrix
//matrix[i * colsX + j] = (i * colsX + j) * 10 * i;
}
}
//printMatrix(matrix, colsX, rowsY);
int border_number = 10; // -410
cout << "border_number = " << border_number << '\n';
startCPU = clock();
int *result = new int[rowsY];
//int *count_points = new int[rowsY];
int r;
int count_points;
for (int i = 0; i < rowsY; i++) // alg CPU func
{
//int r = 0;
//int count_points = 0;
count_points = 0;
for (int j = 1; j < colsX - 1; j++)
{
//r = r + matrix[i * colsX + j]; // original
r = matrix[i * colsX + j - 1] - matrix[i * colsX + j + 1]; // 2, ( /);
//dI = dy/dx -> , x
//cout << "r = " << r << "\n";
if (r > border_number)
{
//cout << "r = " << r << "\n";
//cout << "found one" << "\n";
count_points++;
}
}
result[i] = count_points;
//cout << "in " << i << " row found " << result[i] << " points" << "\n";
}
/*
cout << "result in CPU :" << "\n";
for (int i = 0; i < rowsY; i++)
{
cout << result[i] << " ";
}
cout << '\n';
*/
clock_t end = clock();
elapsedTimeCPU = (double)(end-startCPU)/CLOCKS_PER_SEC;
cout << "CPU calculating time = " << elapsedTimeCPU * 1000 << " ms\n";
cout << "CPU memory throughput = " << elementsQuantity *sizeof(int)/elapsedTimeCPU/1024/1024/1024 << " Gb/s\n";
cout << "\n";
hipEventCreate(&startCUDA);
hipEventCreate(&stopCUDA);
int *dev_original_matrix, *dev_result;
int *host_original_matrix, * host_result;
host_original_matrix = matrix;
host_result = new int[rowsY];
for (int i = 0; i < rowsY; i++)
{
host_result[i] = 0;
}
CHECK( hipMalloc(&dev_original_matrix, elementsQuantity * sizeof(int)));
CHECK( hipMalloc(&dev_result, rowsY * sizeof(int)));
CHECK( hipMemcpy(dev_original_matrix, host_original_matrix, elementsQuantity * sizeof(int), hipMemcpyHostToDevice));
CHECK( hipMemcpy(dev_result, host_result, rowsY * sizeof(int), hipMemcpyHostToDevice));
hipEventRecord(startCUDA, 0);
hipLaunchKernelGGL(( calculate), dim3((rowsY + 255) / 256), dim3(256), 0, 0, dev_original_matrix, dev_result, elementsQuantity, rowsY, colsX, border_number);
hipEventRecord(stopCUDA, 0);
cout << "FINISH" << '\n';
hipEventSynchronize(stopCUDA);
CHECK(hipGetLastError());
hipEventElapsedTime(&elapsedUsualTimeCUDA, startCUDA, stopCUDA);
cout << "CUDA sum time = " << elapsedUsualTimeCUDA << " ms\n";
cout << "CUDA memory throughput = " << elementsQuantity * sizeof(int) / elapsedUsualTimeCUDA/1024/1024/1.024 << " Gb/s\n";
CHECK( hipMemcpy(host_result, dev_result, rowsY * sizeof(int),hipMemcpyDeviceToHost));
/*
cout << '\n' << "host_result = " << '\n';
printMatrix(host_result, 1, rowsY);
cout << '\n' << "result = " << '\n';
printMatrix(result, 1, rowsY);
*/
cout << "result was correct " << checkResult(host_result, result, colsX, rowsY) << "\n";
cout << "Data size = " << (float)4 * elementsQuantity / 1024 / 1024 << "\n";
CHECK( hipFree(dev_original_matrix));
CHECK( hipFree(dev_result));
//}
///*
//**********************************************************************************************
//
hipEventCreate(&startOptimalCUDA);
hipEventCreate(&stopOptimalCUDA);
int* good_host_result = new int[rowsY];
for (int i = 0; i < rowsY; i++)
{
good_host_result[i] = 0; // 0
}
int *good_dev_result;
CHECK( hipMalloc(&dev_original_matrix, elementsQuantity * sizeof(int)));
CHECK( hipMalloc(&good_dev_result,rowsY * sizeof(int)));
CHECK( hipMemcpy(dev_original_matrix, host_original_matrix, elementsQuantity * sizeof(int), hipMemcpyHostToDevice));
CHECK( hipMemcpy(good_dev_result, good_host_result, rowsY * sizeof(int), hipMemcpyHostToDevice));
hipEventRecord(startOptimalCUDA, 0);
hipLaunchKernelGGL(( goodCalculation), dim3((rowsY + 255) / 256), dim3(256), 0, 0, dev_original_matrix, good_dev_result, elementsQuantity, rowsY, colsX, border_number);
//cout << '\n' << "good_host_result = " << '\n';
//printMatrix(good_host_result, 1, rowsY);
//cout << '\n' << "good_dev_result = " << '\n'; // good_dev_result ?
//printMatrix(good_dev_result, 1, rowsY);
hipEventRecord(stopOptimalCUDA, 0);
CHECK( hipMemcpy(good_host_result, good_dev_result, rowsY * sizeof(int),hipMemcpyDeviceToHost));
cout << ("OPTIMAL SUMMATION WAS FINISHED");
hipEventElapsedTime(&elapsedOptimalTime, startOptimalCUDA, stopOptimalCUDA);
cout << "CUDA GOOD (OPTIMAL) sum time = " << elapsedOptimalTime << " ms\n";
cout << "CUDA GOOD (OPTIMAL) memory throughput = " << elementsQuantity * sizeof(int) / elapsedOptimalTime/1024/1024/1.024 << " Gb/s\n";
//cout << '\n' << "good_host_result = " << '\n';
//printMatrix(good_host_result, 1, rowsY);
cout << "result was correct" << checkResult(good_host_result, result, colsX, rowsY) << "\n";
cout << "Data size = " << (float)4 * elementsQuantity / 1024 / 1024 << "\n"; // float original, ok
CHECK( hipFree(dev_original_matrix));
CHECK( hipFree(good_dev_result));
return 0;
}
//*/
|
805723b36614e3530d49af5171aee80a332cc660.cu
|
#include <iostream>
using namespace std;
#define CHECK(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
cout<< "Error:" << cudaGetErrorString(_m_cudaStat) \
<< " at line " << __LINE__ << " in file " << __FILE__ << "\n"; \
exit(1); \
} }
//calculate<<<(rowsY + 255) / 256, 256>>>(dev_original_matrix, dev_result, elementsQuantity, rowsY, colsX, border_number); <<<кол-во блоков в grid, кол-во потоков>>>
// + 255 для того, чтобы точно уместить все данные
// размер массива ограничен максимальным размером пространства потоков = 256
__global__ void calculate(int *dev_original_matrix, int *dev_result, int elementsQuantity, int rowsY, int colsX, int border_number)
{
int current_row_number = threadIdx.x + blockIdx.x * blockDim.x; // номер строки в изображении
// Grid -> блок -> поток(поток в блоке, блок в сетке), один поток запускает функцию calculate один раз
// blockIdx - номер блока в 1D-grid, blockDim - кол-во блоков в одном потоке
int r;
int count_points = 0;
for (int i = 1; i < colsX - 1; i++) // крайние не считаются
{
//r = matrix[i * colsX + j - 1] - matrix[i * colsX + j + 1]; // мб надо делить на 2, но в Гонсалесе вот так(см градиент Собела/Собеля);
r = dev_original_matrix[current_row_number * colsX + i - 1] - dev_original_matrix[current_row_number * colsX + i + 1];
if (r > border_number)
{
count_points++;
}
}
dev_result[current_row_number] = count_points;
//cout << "dev_result in GPU claculate :" << "\n";
/*
for (int i = 0; i < current_row_number; i++)
{
cout << dev_result[i] << " ";
}
cout << '\n';
*/
}
//СОХРАНЯТЬ РЕЗУЛЬТАТ В ЛОК ПЕРЕМ ПОТОМ В РЕЗ МАТРИЦУ
__global__ void goodCalculation(int *dev_original_matrix, int *dev_result, int elementsQuantity,
int rowsY, int colsX, int border_number)
{
int rowsCountBeforeOrangeLine = blockIdx.x * blockDim.x;
//int bigRowNumber = blockIdx.x * blockDim.x + threadIdx.x;
int cacheWidth = 32; // original
int rectangleHeight = 8; // original
//int rectangleInRowQuantity = colsX / cacheWidth; // original
int rectangleInRowQuantity = (colsX - 2) / (cacheWidth - 2);
__shared__ int cache[256][33];
int r;
int count_points = 0;
int rowInCache = threadIdx.x / cacheWidth; // номер строки в верхнем ЗП (первый элемент)
int currentRowInCache = rowInCache;
int columnInCache = threadIdx.x % cacheWidth;
int pixelCountUpperRowInTopGreenRect = (rowsCountBeforeOrangeLine + rowInCache) * colsX;
int indexTopPixelInCurrentFPInsideImage = pixelCountUpperRowInTopGreenRect + columnInCache;
int verticalStep = rectangleHeight * colsX;
for (int stringIteration = 0; stringIteration < rectangleInRowQuantity; stringIteration++)
{
int currentPixelInImage = indexTopPixelInCurrentFPInsideImage;
for (int levelInCache = 0; levelInCache < cacheWidth; levelInCache++)
{
cache[currentRowInCache][columnInCache] = dev_original_matrix[currentPixelInImage];
currentRowInCache += rectangleHeight;
currentPixelInImage += verticalStep; // verticalStep по ЗП вниз
}
indexTopPixelInCurrentFPInsideImage += 30; // переход к след ФП
currentRowInCache = rowInCache;
__syncthreads();
r = 0;
// тут начинаются ошибки с неправильным обращенем к памяти - fixed
for (int i = 1; i < cacheWidth - 1; i++)
{
r = cache[threadIdx.x][i - 1] - cache[threadIdx.x][i + 1];
if (r > border_number) // ошибка
count_points = count_points + 1;
}
__syncthreads();
}
dev_result[rowsCountBeforeOrangeLine + threadIdx.x] = count_points; // ошибка с неправильным обращенем к памяти - fixed
}
void printMatrix(int* matrix, int colsX, int rowsY)
{
for (int i = 0; i < rowsY; i++)
{
for (int j = 0; j < colsX; j++)
{
cout << matrix[i * colsX + j] << "\t";
}
cout << "\n";
}
}
bool checkResult(int* host_result, int* result, int colsX, int rowsY)
{
for (int i = 0; i < 20; i++)
{
cout << "host_result[ " << i << " ] = " << host_result[i] << '\n';
}
for (int i = 0; i < 20; i++)
{
cout << "result[ " << i << " ] = " << result[i] << '\n';
}
for (int i = 0; i < rowsY; i++)
{
if (host_result[i] != result[i])
{
//cout << "host_result[ " << i << " ] = " << host_result[i] << '\n';
//cout << "result[ " << i << " ] = " << result[i] << '\n';
return false;
}
}
return true;
}
int main(void)
{
cudaEvent_t startCUDA, stopCUDA, startOptimalCUDA, stopOptimalCUDA;
clock_t startCPU;
float elapsedUsualTimeCUDA, elapsedTimeCPU, elapsedOptimalTime;
// 13. Создайте детектор вертикальных границ на изображении (в градациях серого).
// Функция должна для каждой строки считать количество точек, в которых производная цвета по горизонтали больше заданного значения.
// Все изображения хранятся в памяти по строкам.
int colsX = 1502; // пикселей 30 * 50 + 2 = 1502
int rowsY = 17920; // пикселей 256 * 70 = 17920
int elementsQuantity = colsX * rowsY;
cout << "Size in Mbs = " << elementsQuantity * sizeof(int) / 1048576.0 << "\n";
int *matrix = new int[elementsQuantity];
for (int i = 0; i < rowsY; i++)
{
for (int j = 0; j < colsX; j++)
{
matrix[i * colsX + j] = rand() % 255; // filling matrix
//matrix[i * colsX + j] = (i * colsX + j) * 10 * i;
}
}
//printMatrix(matrix, colsX, rowsY);
int border_number = 10; // -410
cout << "border_number = " << border_number << '\n';
startCPU = clock();
int *result = new int[rowsY];
//int *count_points = new int[rowsY];
int r;
int count_points;
for (int i = 0; i < rowsY; i++) // alg CPU func
{
//int r = 0;
//int count_points = 0;
count_points = 0;
for (int j = 1; j < colsX - 1; j++)
{
//r = r + matrix[i * colsX + j]; // original
r = matrix[i * colsX + j - 1] - matrix[i * colsX + j + 1]; // мб надо делить на 2, но в Гонсалесе вот так(см градиент Собела/Собеля);
//dI = dy/dx -> у нас только вертикальные границы, поэтому считаем приращение только по x
//cout << "r = " << r << "\n";
if (r > border_number)
{
//cout << "r = " << r << "\n";
//cout << "found one" << "\n";
count_points++;
}
}
result[i] = count_points;
//cout << "in " << i << " row found " << result[i] << " points" << "\n";
}
/*
cout << "result in CPU :" << "\n";
for (int i = 0; i < rowsY; i++)
{
cout << result[i] << " ";
}
cout << '\n';
*/
clock_t end = clock();
elapsedTimeCPU = (double)(end-startCPU)/CLOCKS_PER_SEC;
cout << "CPU calculating time = " << elapsedTimeCPU * 1000 << " ms\n";
cout << "CPU memory throughput = " << elementsQuantity *sizeof(int)/elapsedTimeCPU/1024/1024/1024 << " Gb/s\n";
cout << "\n";
cudaEventCreate(&startCUDA);
cudaEventCreate(&stopCUDA);
int *dev_original_matrix, *dev_result;
int *host_original_matrix, * host_result;
host_original_matrix = matrix;
host_result = new int[rowsY];
for (int i = 0; i < rowsY; i++)
{
host_result[i] = 0;
}
CHECK( cudaMalloc(&dev_original_matrix, elementsQuantity * sizeof(int)));
CHECK( cudaMalloc(&dev_result, rowsY * sizeof(int)));
CHECK( cudaMemcpy(dev_original_matrix, host_original_matrix, elementsQuantity * sizeof(int), cudaMemcpyHostToDevice));
CHECK( cudaMemcpy(dev_result, host_result, rowsY * sizeof(int), cudaMemcpyHostToDevice));
cudaEventRecord(startCUDA, 0);
calculate<<<(rowsY + 255) / 256, 256>>>(dev_original_matrix, dev_result, elementsQuantity, rowsY, colsX, border_number);
cudaEventRecord(stopCUDA, 0);
cout << "FINISH" << '\n';
cudaEventSynchronize(stopCUDA);
CHECK(cudaGetLastError());
cudaEventElapsedTime(&elapsedUsualTimeCUDA, startCUDA, stopCUDA);
cout << "CUDA sum time = " << elapsedUsualTimeCUDA << " ms\n";
cout << "CUDA memory throughput = " << elementsQuantity * sizeof(int) / elapsedUsualTimeCUDA/1024/1024/1.024 << " Gb/s\n";
CHECK( cudaMemcpy(host_result, dev_result, rowsY * sizeof(int),cudaMemcpyDeviceToHost));
/*
cout << '\n' << "host_result = " << '\n';
printMatrix(host_result, 1, rowsY);
cout << '\n' << "result = " << '\n';
printMatrix(result, 1, rowsY);
*/
cout << "result was correct " << checkResult(host_result, result, colsX, rowsY) << "\n";
cout << "Data size = " << (float)4 * elementsQuantity / 1024 / 1024 << "\n";
CHECK( cudaFree(dev_original_matrix));
CHECK( cudaFree(dev_result));
//}
///*
//**********************************************************************************************
//ХОРОШЕЕ УМНОЖЕНИЕ
cudaEventCreate(&startOptimalCUDA);
cudaEventCreate(&stopOptimalCUDA);
int* good_host_result = new int[rowsY];
for (int i = 0; i < rowsY; i++)
{
good_host_result[i] = 0; // 0
}
int *good_dev_result;
CHECK( cudaMalloc(&dev_original_matrix, elementsQuantity * sizeof(int)));
CHECK( cudaMalloc(&good_dev_result,rowsY * sizeof(int)));
CHECK( cudaMemcpy(dev_original_matrix, host_original_matrix, elementsQuantity * sizeof(int), cudaMemcpyHostToDevice));
CHECK( cudaMemcpy(good_dev_result, good_host_result, rowsY * sizeof(int), cudaMemcpyHostToDevice));
cudaEventRecord(startOptimalCUDA, 0);
goodCalculation<<<(rowsY + 255) / 256, 256>>>(dev_original_matrix, good_dev_result, elementsQuantity, rowsY, colsX, border_number);
//cout << '\n' << "good_host_result = " << '\n';
//printMatrix(good_host_result, 1, rowsY);
//cout << '\n' << "good_dev_result = " << '\n'; // good_dev_result пустая?
//printMatrix(good_dev_result, 1, rowsY);
cudaEventRecord(stopOptimalCUDA, 0);
CHECK( cudaMemcpy(good_host_result, good_dev_result, rowsY * sizeof(int),cudaMemcpyDeviceToHost));
cout << ("OPTIMAL SUMMATION WAS FINISHED");
cudaEventElapsedTime(&elapsedOptimalTime, startOptimalCUDA, stopOptimalCUDA);
cout << "CUDA GOOD (OPTIMAL) sum time = " << elapsedOptimalTime << " ms\n";
cout << "CUDA GOOD (OPTIMAL) memory throughput = " << elementsQuantity * sizeof(int) / elapsedOptimalTime/1024/1024/1.024 << " Gb/s\n";
//cout << '\n' << "good_host_result = " << '\n';
//printMatrix(good_host_result, 1, rowsY);
cout << "result was correct" << checkResult(good_host_result, result, colsX, rowsY) << "\n";
cout << "Data size = " << (float)4 * elementsQuantity / 1024 / 1024 << "\n"; // float original, ok
CHECK( cudaFree(dev_original_matrix));
CHECK( cudaFree(good_dev_result));
return 0;
}
//*/
|
acae645ace0671ecb57baa0427dac62e9beaed70.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <math.h>
#include <hiprand/hiprand_kernel.h>
#include <time.h>
#include <unistd.h>
#include <thrust/scan.h>
#include <thrust/remove.h>
#include <thrust/execution_policy.h>
#include <iostream>
#include "custom_temporary_allocation.cuh"
#include "parameter.cuh"
using namespace std;
typedef hiprandStatePhilox4_32_10_t myCurandState_t;
//#define DEBUG
#define cudaCheckError() { \
hipError_t e=hipGetLastError(); \
if(e!=hipSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \
exit(0); \
} \
}
#define FACTOR 5
#define ITERATIONS 100
#define TOTAL (SIZE * SIZE)
#define GRIDSIZE (SIZE+2)
#define GRIDTOTAL (SIZE+2)*(SIZE+2)
#define SRAND_VALUE 200
#define PENUMBER (TOTAL/PESIZE)
#define SAM_NUM_VALUES ((SIZE+2)*(SIZE+2))
#define SAM_PENUMBER (SAM_NUM_VALUES / SAM_PESIZE)
const int agentTypeOneNumber = agentNumber / 2;
const int agentTypeTwoNumber = agentNumber - agentTypeOneNumber;
const int happinessThreshold = 5;
void printOutput(int [SIZE+2][SIZE+2]);
void initPos(int grid [SIZE+2][SIZE+2]);
int random_location();
__device__ static const int FAK_LEN = 1024; // length of factorial table
__device__ int hyp_n_last[SAM_PENUMBER], hyp_m_last[SAM_PENUMBER], hyp_N_last[SAM_PENUMBER]; // Last values of parameters
__device__ int hyp_mode[SAM_PENUMBER], hyp_mp[SAM_PENUMBER]; // Mode, mode+1
__device__ int hyp_bound[SAM_PENUMBER]; // Safety upper bound
__device__ double hyp_a[SAM_PENUMBER]; // hat center
__device__ double hyp_h[SAM_PENUMBER]; // hat width
__device__ double hyp_fm[SAM_PENUMBER]; // Value at mode
__device__ int device_pe_inuse;
__device__ int device_num_inuse;
__device__ int device_removed_move_list_end;
__device__ int device_removed_space_list_end;
__device__ int device_penumber_inuse;
__device__ int device_reduced_pe_position;
__device__ float getnextrand(myCurandState_t *state){
return (hiprand_uniform(state));
}
__global__ void initSamCurand(myCurandState_t state[SAM_PENUMBER]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < SAM_PENUMBER){
hiprand_init(idx, 0 , 0, &state[idx]);
}
}
__device__ const double
C0 = 0.918938533204672722, // ln(sqrt(2*pi))
C1 = 1./12.,
C3 = -1./360.;
__device__ double fac_table[FAK_LEN];
__device__ int initialized = 0;
__device__ double LnFac(int n) {
if (n < FAK_LEN) {
if (n <= 1) {
if (n < 0) printf("Parameter negative in LnFac function\n");
return 0;
}
if (!initialized) { // first time. Must initialize table
// make table of ln(n!)
double sum = fac_table[0] = 0.;
for (int i=1; i<FAK_LEN; i++) {
sum += log(double(i));
fac_table[i] = sum;
}
initialized = 1;
}
return fac_table[n];
}
// not found in table. use Stirling approximation
double n1, r;
n1 = n; r = 1. / n1;
return (n1 + 0.5)*log(n1) - n1 + C0 + r*(C1 + r*r*C3);
//return logf(n);
}
__device__ double fc_lnpk(int k, int L, int m, int n) {
// subfunction used by hypergeometric and Fisher's noncentral hypergeometric distribution
return(LnFac(k) + LnFac(m - k) + LnFac(n - k) + LnFac(L + k));
}
__device__ int HypInversionMod (myCurandState_t stateHyper[SAM_PENUMBER],int n, int m, int N, int idx) {
/*
Subfunction for Hypergeometric distribution. Assumes 0 <= n <= m <= N/2.
Overflow protection is needed when N > 680 or n > 75.
Hypergeometric distribution by inversion method, using down-up
search starting at the mode using the chop-down technique.
This method is faster than the rejection method when the variance is low.
*/
//int idx = threadIdx.x + blockIdx.x * blockDim.x;
// Sampling
int I; // Loop counter
int L = N - m - n; // Parameter
double modef; // mode, float
double Mp, np; // m + 1, n + 1
double p; // temporary
double U; // uniform random
double c, d; // factors in iteration
double divisor; // divisor, eliminated by scaling
double k1, k2; // float version of loop counter
double L1 = L; // float version of L
Mp = (double)(m + 1);
np = (double)(n + 1);
if (N != hyp_N_last[idx] || m != hyp_m_last[idx] || n != hyp_n_last[idx]) {
// set-up when parameters have changed
hyp_N_last[idx] = N; hyp_m_last[idx] = m; hyp_n_last[idx] = n;
p = Mp / (N + 2.);
modef = np * p; // mode, real
hyp_mode[idx] = (int)modef; // mode, integer
if (hyp_mode[idx] == modef && p == 0.5) {
hyp_mp[idx] = hyp_mode[idx]--;
}
else {
hyp_mp[idx] = hyp_mode[idx] + 1;
}
// mode probability, using log factorial function
// (may read directly from fac_table if N < FAK_LEN)
hyp_fm[idx] = exp(LnFac(N-m) - LnFac(L+hyp_mode[idx]) - LnFac(n-hyp_mode[idx])
+ LnFac(m) - LnFac(m-hyp_mode[idx]) - LnFac(hyp_mode[idx])
- LnFac(N) + LnFac(N-n) + LnFac(n) );
// safety bound - guarantees at least 17 significant decimal digits
// bound = min(n, (int)(modef + k*c'))
hyp_bound[idx] = (int)(modef + 11. * sqrt(modef * (1.-p) * (1.-n/(double)N)+1.));
if (hyp_bound[idx] > n) hyp_bound[idx] = n;
}
// loop until accepted
//int max_iterations = 1000;
while(1) {
// if(!(max_iterations--))
// break;
U = getnextrand(&stateHyper[idx]); // uniform random number to be converted
//printf(" U is %lf\n",U);
// start chop-down search at mode
if ((U -= hyp_fm[idx]) <= 0.) return(hyp_mode[idx]);
c = d = hyp_fm[idx];
// alternating down- and upward search from the mode
k1 = hyp_mp[idx] - 1; k2 = hyp_mode[idx] + 1;
for (I = 1; I <= hyp_mode[idx]; I++, k1--, k2++) {
// if(!(max_iterations--))
// break;
// Downward search from k1 = hyp_mp - 1
divisor = (np - k1)*(Mp - k1);
// Instead of dividing c with divisor, we multiply U and d because
// multiplication is faster. This will give overflow if N > 800
U *= divisor; d *= divisor;
c *= k1 * (L1 + k1);
if ((U -= c) <= 0.) return(hyp_mp[idx] - I - 1); // = k1 - 1
//printf("Line 228 I %d \n",I);
// Upward search from k2 = hyp_mode + 1
divisor = k2 * (L1 + k2);
// re-scale parameters to avoid time-consuming division
U *= divisor; c *= divisor;
d *= (np - k2) * (Mp - k2);
if ((U -= d) <= 0.) return(hyp_mode[idx] + I); // = k2
// Values of n > 75 or N > 680 may give overflow if you leave out this..
// overflow protection
// if (U > 1.E100) {U *= 1.E-100; c *= 1.E-100; d *= 1.E-100;}
}
// Upward search from k2 = 2*mode + 1 to bound
for (k2 = I = hyp_mp[idx] + hyp_mode[idx]; I <= hyp_bound[idx]; I++, k2++) {
//if(!(max_iterations--))
// break;
divisor = k2 * (L1 + k2);
U *= divisor;
d *= (np - k2) * (Mp - k2);
if ((U -= d) <= 0.) return(I);
// more overflow protection
// if (U > 1.E100) {U *= 1.E-100; d *= 1.E-100;}
}
}
}
__device__ int HypRatioOfUnifoms (myCurandState_t stateHyper[SAM_PENUMBER], int n, int m, int N, int idx) {
/*
Subfunction for Hypergeometric distribution using the ratio-of-uniforms
rejection method.
This code is valid for 0 < n <= m <= N/2.
The computation time hardly depends on the parameters, except that it matters
a lot whether parameters are within the range where the LnFac function is
tabulated.
Reference: E. Stadlober: "The ratio of uniforms approach for generating
discrete random variates". Journal of Computational and Applied Mathematics,
vol. 31, no. 1, 1990, pp. 181-189.
*/
//int idx = threadIdx.x + blockIdx.x * blockDim.x;
const double SHAT1 = 2.943035529371538573; // 8/e
const double SHAT2 = 0.8989161620588987408; // 3-sqrt(12/e)
int L; // N-m-n
int mode; // mode
int k; // integer sample
double x; // real sample
double rNN; // 1/(N*(N+2))
double my; // mean
double var; // variance
double u; // uniform random
double lf; // ln(f(x))
L = N - m - n;
if (hyp_N_last[idx] != N || hyp_m_last[idx] != m || hyp_n_last[idx] != n) {
hyp_N_last[idx] = N; hyp_m_last[idx] = m; hyp_n_last[idx] = n; // Set-up
rNN = 1. / ((double)N*(N+2)); // make two divisions in one
my = (double)n * m * rNN * (N+2); // mean = n*m/N
mode = (int)(double(n+1) * double(m+1) * rNN * N); // mode = floor((n+1)*(m+1)/(N+2))
var = (double)n * m * (N-m) * (N-n) / ((double)N*N*(N-1));// variance
hyp_h[idx] = sqrt(SHAT1 * (var+0.5)) + SHAT2; // hat width
hyp_a[idx] = my + 0.5; // hat center
hyp_fm[idx] = fc_lnpk(mode, L, m, n); // maximum
hyp_bound[idx] = (int)(hyp_a[idx] + 4.0 * hyp_h[idx]); // safety-bound
if (hyp_bound[idx] > n) hyp_bound[idx] = n;
}
while(1) {
u = getnextrand(&stateHyper[idx]); // uniform random number
if (u == 0) continue; // avoid division by 0
x = hyp_a[idx] + hyp_h[idx] * (getnextrand(&stateHyper[idx])-0.5) / u; // generate hat distribution
if (x < 0. || x > 2E9) continue; // reject, avoid overflow
k = (int)x;
if (k > hyp_bound[idx]) continue; // reject if outside range
lf = hyp_fm[idx] - fc_lnpk(k,L,m,n); // ln(f(k))
if (u * (4.0 - u) - 3.0 <= lf) break; // lower squeeze accept
if (u * (u-lf) > 1.0) continue; // upper squeeze reject
if (2.0 * log(u) <= lf) break; // final acceptance
}
return k;
}
__device__ int Hypergeometric (myCurandState_t stateHyper[SAM_PENUMBER], int n, int m, int N, int idx) {
/*
This function generates a random variate with the hypergeometric
distribution. This is the distribution you get when drawing balls without
replacement from an urn with two colors. n is the number of balls you take,
m is the number of red balls in the urn, N is the total number of balls in
the urn, and the return value is the number of red balls you get.
This function uses inversion by chop-down search from the mode when
parameters are small, and the ratio-of-uniforms method when the former
method would be too slow or would give overflow.
*/
int fak, addd; // used for undoing transformations
int x; // result
hyp_n_last[idx] = hyp_m_last[idx] = hyp_N_last[idx] = -1; // Last values of hypergeometric parameters
// check if parameters are valid
if (n > N || m > N || n < 0 || m < 0) {
printf("Parameter out of range in hypergeometric function n %ld m %ld N %ld idx %d\n",n,m,N,idx);
printf("Parameter out of range in hypergeometric function %d,%d,%d,%d\n", n > N, m > N, n < 0, m < 0);
return 0;
}
// symmetry transformations
fak = 1; addd = 0;
if (m > N/2) {
// invert m
m = N - m;
fak = -1; addd = n;
}
if (n > N/2) {
// invert n
n = N - n;
addd += fak * m; fak = - fak;
}
if (n > m) {
// swap n and m
x = n; n = m; m = x;
}
// cases with only one possible result end here
if (n == 0) return addd;
//------------------------------------------------------------------
// choose method
//------------------------------------------------------------------
if (N > 680 || n > 70) {
// use ratio-of-uniforms method
x = HypRatioOfUnifoms (stateHyper, n, m, N,idx);
}
else {
// inversion method, using chop-down search from mode
x = HypInversionMod (stateHyper, n, m, N,idx);
}
// undo symmetry transformations
return x * fak + addd;
}
__global__ void clearSamples(int samples[SAM_NUM_VALUES]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < (SAM_NUM_VALUES)){
samples[idx] = 0;
}
}
__device__ void methodA(myCurandState_t state[SAM_PENUMBER],int N, int n, int num_sample, int initialTocurrent,int device_list[SAM_NUM_VALUES],int samples[SAM_NUM_VALUES]) {
//ASSERT_LEQ(n, N);
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// Initialization
int sample = 0;
double Nreal = (double) N;
double top = Nreal - n;
// Main loop
while (n >= 2) {
int S = 0;
double V = getnextrand(&state[idx]);
double quot = top / Nreal;
while (quot > V) {
S++;
top -= 1.0;
Nreal -= 1.0;
quot = (quot * top) / Nreal;
}
// Skip over next S records and select the following one
sample += S + 1;
//samples[idx][num_sample++] = sample + initialTocurrent;
samples[idx*SAM_PESIZE + num_sample++] = device_list[idx*SAM_PESIZE + sample + initialTocurrent-1];
//callback(sample);
Nreal -= 1.0;
n--;
}
if (n == 1) {
int S = round(Nreal) * getnextrand(&state[idx]);
sample += S + 1;
//samples[idx][num_sample++] = sample + initialTocurrent;
samples[idx*SAM_PESIZE + num_sample++] = device_list[idx*SAM_PESIZE + sample + initialTocurrent-1];
//callback(sample);
}
}
// Sampling method D from Vitter et al.
//
// \param N Size of population.
// \param n Number of samples.
// \param gen Uniform random variate generator.
// \param samples Function to process sample.
//
__device__ void sample(myCurandState_t state[SAM_PENUMBER], int N, int n, int device_list[SAM_NUM_VALUES], int samples[SAM_NUM_VALUES]) {
//ASSERT_LEQ(n, N);
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int initialN = N;
// Initialization
int sample = 0;
int num_sample = 0;
double nreal = (double) n;
double ninv = 1.0 / nreal;
double Nreal = (double) N;
double Vprime = exp(log(getnextrand(&state[idx])) * ninv);
int qu1 = N + 1 - n;
double qu1real = Nreal + 1.0 - nreal;
int negalphainv = -13;
int threshold = n * (-negalphainv);
int S = 0;
// Main loop
while (n > 1 && threshold < N) {
double nmin1inv = 1.0 / (nreal - 1.0);
double negSreal = 0.0;
while (true) {
// Step D2: Generate U and X
double X;
while (true) {
X = Nreal * (1.0 - Vprime);
S = X;
if (S < qu1) break;
Vprime = exp(log(getnextrand(&state[idx])) * ninv);
}
double U = getnextrand(&state[idx]);
negSreal = -(double)S;
// Step D3: Accept?
double y1 = exp(log(U * Nreal / qu1real) * nmin1inv);
Vprime = y1 * (-X / Nreal + 1.0) * (qu1real / (negSreal + qu1real));
if (Vprime <= 1.0) break; // Accept!
// Step D4: Accept?
double y2 = 1.0; double top = Nreal - 1.0;
double bottom;
double limit;
if (n - 1 > S) {
bottom = Nreal - nreal;
limit = N - S;
} else {
bottom = negSreal + Nreal - 1.0;
limit = qu1;
}
for (int t = N; t > limit; t--) {
y2 = (y2 * top) / bottom;
top -= 1.0;
bottom -= 1.0;
}
if (Nreal / (Nreal - X) >= y1 * exp(log(y2) * nmin1inv)) {
// Accept!
Vprime = exp(log(getnextrand(&state[idx])) * nmin1inv);
break;
}
Vprime = exp(log(getnextrand(&state[idx])) * ninv);
}
// Skip over next S records and select the following one
sample += S + 1;
//samples[idx][num_sample++] = sample;
samples[idx*SAM_PESIZE + num_sample++] = device_list[idx*SAM_PESIZE +sample-1];
//callback(sample);
N = (N - 1) - S;
Nreal = (Nreal - 1.0) + negSreal;
n--;
nreal -= 1.0;
ninv = nmin1inv;
qu1 -= S;
qu1real += negSreal;
threshold += negalphainv;
}
if (n > 1) {
int currentN = N;
methodA(state, N, n, num_sample, initialN - currentN, device_list,samples);
//samples[num_sample++] = sample + initialN - currentN;
//methodA(N, n, [&](int sample) {
// callback(sample + initialN - currentN);
//});
} else if (n == 1) {
S = N * Vprime;
// Skip over next S records and select the following one
sample += S + 1;
//samples[idx][num_sample++] = sample;
samples[idx*SAM_PESIZE + num_sample++] = device_list[idx*SAM_PESIZE +sample-1];
//callback(sample);
}
}
__global__ void sampleP(myCurandState_t state[SAM_PENUMBER], myCurandState_t stateHyper[SAM_PENUMBER], int device_list[SAM_NUM_VALUES],int samples[SAM_NUM_VALUES], int n, int j, int k) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
//idx += 1;
if(idx < device_pe_inuse){
int seed = 1;
//int counter = 0;
int m,x;
while(j - k != 0) {
hiprand_init(seed, 0 , 0, &stateHyper[idx]);
m = floor( (j+k)/2.0 );
//printf("sampleP1 n %d idx %d m %d\n",n,idx,m);
//__device__ int Hypergeometric (hiprandState_t stateHyper[PENUMBER],
//int n, int m, int N, int idx) {
/*
This function generates a random variate with the hypergeometric
distribution. This is the distribution you get when drawing balls without
replacement from an urn with two colors. n is the number of balls you take,
m is the number of red balls in the urn, N is the total number of balls in
the urn, and the return value is the number of red balls you get. */
//printf("would call Hypergeometric(stateHyper, %d, %d, %d, %d)\n", n, (m-j)*PESIZE + 1, (k-j)*PESIZE + 1, idx);
//printf("j is now %d, k is %d, m is %d, sums are %d and %d\n", j, k, m, k - (j - 1), m - (j - 1));
if(k != device_pe_inuse - 1){
x = Hypergeometric(stateHyper, n, (m-(j-1))*SAM_PESIZE, (k-(j-1))*SAM_PESIZE, idx);
}
else{
x = Hypergeometric(stateHyper, n, (m-(j-1))*SAM_PESIZE, ((k-1)-(j-1))*SAM_PESIZE + device_num_inuse % SAM_PESIZE, idx);
}
//printf("sampleP2 n %d idx %d x %d\n",n,idx,x);
//int x = m;
if(idx <= m) {
n = x;
k = m;
seed = seed * 2;
} else {
n = n-x;
j = m + 1;
seed = seed * 2 + 1;
}
}
//printf("sample n %d \n",n);
if(idx != device_pe_inuse - 1 ) {
//printf("idx %d sampling %d values\n", idx, n);
sample(state, SAM_PESIZE, n, device_list, samples);
}
else {
//printf("n > PESIZE %d \n",n);
sample(state, device_num_inuse % SAM_PESIZE, n, device_list, samples);
}
/*if(n <= PESIZE ) {
//printf("idx %d sampling %d values\n", idx, n);
sample(state, PESIZE, n, device_list, samples);
}
else {
printf("n > PESIZE %d \n",n);
}*/
}
}
//__global__ void print_device_reduced_pe_position(){
//printf("reduced_pe_position %d \n",( int( 0.5 + ceil((float)device_reduced_pe_position / (PESIZE) )) ) );
//printf("device_reduced_pe_position %d \n",(device_reduced_pe_position ) );
//}
__global__ void initCurand(myCurandState_t state[][PESIZE]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy=blockIdx.y*blockDim.y+threadIdx.y;
if(idx < PENUMBER && idy<PESIZE){
hiprand_init(idx*(PESIZE)+idy,0 , 0, &state[idx][idy]);
}
}
__global__ void compute(int grid[][SIZE+2], int new_grid[][SIZE+2], int * move_list, int * space_list, int iteration){
int idx=blockIdx.x*blockDim.x+threadIdx.x;
int idy=blockIdx.y*blockDim.y+threadIdx.y;
int sameTypeCount=0;
int current_id = idx*(SIZE+2)+idy;
if(grid[idx][idy] != 0){
int currentType = grid[idx][idy];
if(grid[idx-1][idy-1] == currentType){
sameTypeCount += 1;
}
if(grid[idx-1][idy] == currentType){
sameTypeCount += 1;
}
if(grid[idx-1][idy+1] == currentType){
sameTypeCount += 1;
}
if(grid[idx][idy-1] == currentType){
sameTypeCount += 1;
}
if(grid[idx][idy+1] == currentType){
sameTypeCount += 1;
}
if(grid[idx+1][idy-1] == currentType){
sameTypeCount += 1;
}
if(grid[idx+1][idy] == currentType){
sameTypeCount += 1;
}
if(grid[idx+1][idy+1] == currentType){
sameTypeCount += 1;
}
if(sameTypeCount < happinessThreshold){
move_list[current_id] = current_id;
space_list[current_id] = current_id;
}
}
else if(idx != 0 && idy !=0 && idx != (SIZE+1) && idy != (SIZE+1) ){
space_list[current_id] = current_id;
}
}
__global__ void update (int grid[][SIZE+2], int new_grid[][SIZE+2], int * move_list, int * space_list){
int idx=blockIdx.x*blockDim.x+threadIdx.x;
int idy=blockIdx.y*blockDim.y+threadIdx.y;
grid[idy][idx] = new_grid[idy][idx];
move_list[idx*(SIZE+2)+idy] = 0;
space_list[idx*(SIZE+2)+idy] = 0;
}
__global__ void sendToRandomPerpe(myCurandState_t state[][PESIZE],int device_list[SAM_NUM_VALUES], int temp_device_list[][PESIZE*FACTOR],int random_list_counter[PENUMBER]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < device_penumber_inuse -1 ){
for(int i=0; i < PESIZE; i++ ){
float r = getnextrand(&state[idx][0]);
int random_position = r * (device_penumber_inuse-1);
int acquired_position = atomicAdd(&random_list_counter[random_position],1);
temp_device_list[random_position][acquired_position] = device_list[idx*PESIZE+i];
}
}
else if(idx == device_penumber_inuse - 1 ){
for(int i=0; i < device_removed_move_list_end % PESIZE; i++ ){
float r = getnextrand(&state[idx][0]);
int random_position = r * (device_penumber_inuse-1);
int acquired_position = atomicAdd(&random_list_counter[random_position],1);
temp_device_list[random_position][acquired_position] = device_list[idx*PESIZE+i];
}
}
}
__global__ void sendToRandom(myCurandState_t state[][PESIZE],int device_list[SAM_NUM_VALUES], int temp_device_list[][PESIZE*FACTOR],int random_list_counter[PENUMBER]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx*PESIZE +idy < device_removed_move_list_end ){
float r = getnextrand(&state[idx][idy]);
int random_position = r * (device_penumber_inuse-1);
int acquired_position = atomicAdd(&random_list_counter[random_position],1);
temp_device_list[random_position][acquired_position] = device_list[idx*PESIZE+idy];
}
}
__global__ void clearCounter(int random_list_counter[PENUMBER]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < device_penumber_inuse){
random_list_counter[idx] = 0;
}
}
__global__ void generateList(int device_list[][PESIZE]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx*PESIZE +idy < device_removed_space_list_end ){
device_list[idx][idy] = idx*PESIZE +idy;
}
}
static __device__ void swap(int *data, int x, int y)
{
int temp = data[x];
data[x] = data[y];
data[y] = temp;
}
static __device__ int partition(int *data, int left, int right)
{
const int mid = left + (right - left) / 2;
const int pivot = data[(mid)];
swap(data, (mid), (left));
int i = left + 1;
int j = right;
while (i <= j) {
while (i <= j && data[(i)] <= pivot) {
i++;
}
while (i <= j && data[(j)] > pivot) {
j--;
}
if (i < j) {
swap(data, (i), (j));
}
}
swap(data, (i - 1), (left));
return i - 1;
}
typedef struct sort_data {
int left;
int right;
} sort_data;
__device__ void quicksort_seq(int *data, int right)
{
int left = 0;
if(left == right)
return;
if (left > right) {
right = 1 + right;
}
int stack_size = 0;
sort_data stack[PESIZE*FACTOR];
stack[stack_size++] = { left, right };
while (stack_size > 0) {
int curr_left = stack[stack_size - 1].left;
int curr_right = stack[stack_size - 1].right;
stack_size--;
if (curr_left < curr_right) {
int part = partition(data, curr_left, curr_right);
stack[stack_size++] = {curr_left, part - 1};
stack[stack_size++] = {part + 1, curr_right};
}
}
}
__global__ void sortList(int temp_device_list[][PESIZE*FACTOR], int random_list_counter[PENUMBER]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < device_penumber_inuse){
int number = random_list_counter[idx];
if(number != 0){
quicksort_seq(temp_device_list[idx], number - 1 );
}
}
}
__global__ void randomPermute(myCurandState_t state[][PESIZE], int temp_device_list[][PESIZE*FACTOR], int random_list_counter[PENUMBER]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int reduced_pe = device_penumber_inuse;
if(idx < reduced_pe){
for (int i = 0; i < random_list_counter[idx]; i++){
float r = getnextrand(&state[idx][0]);
int j = r * (random_list_counter[idx]-1);
int temp = temp_device_list[idx][i] ;
temp_device_list[idx][i] = temp_device_list[idx][j] ;
temp_device_list[idx][j] = temp;
}
}
}
__global__ void recoverSize(int device_list[][PESIZE], int temp_device_list[][PESIZE*FACTOR],int random_list_counter[PENUMBER], int scanned_random_list_counter[PENUMBER]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int reduced_pe = device_penumber_inuse;
if(idx < reduced_pe){
int delta = scanned_random_list_counter[idx];
for(int i=0; i<random_list_counter[idx]; i++){
int addValue = delta + i;
int interResult = device_penumber_inuse*addValue/(PESIZE*device_penumber_inuse);
device_list[interResult][(delta- (PESIZE*device_penumber_inuse/device_penumber_inuse)*interResult + i)] = temp_device_list[idx][i];
}
}
}
struct smaller_than
{
__device__
bool operator()(const int x)
{
return (x < device_removed_space_list_end) == 0;
}
};
struct greater_than
{
__device__
bool operator()(int x)
{
return x > device_removed_move_list_end;
}
};
__global__ void printTempList(int temp_device_list[][PESIZE*FACTOR], int random_list_counter[PENUMBER]){
for(int i =0; i<device_penumber_inuse; i++){
for(int j=0; j<random_list_counter[i];j++){
printf("%d ",temp_device_list[i][j]);
}
printf("\n");
}
}
__global__ void printList(int * list,int *removed_list_end){
printf( "SIZE %d \n",removed_list_end - list) ;
for(int i=0; i<removed_list_end - list; i++){
printf("%d ",list[i]);
}
printf("\n");
}
__global__ void printListPre(int * list){
printf( "SIZE %d \n",device_removed_space_list_end) ;
for(int i=0; i<device_removed_space_list_end; i++){
printf("%d ",list[i]);
}
printf("\n");
}
__global__ void prepareNewGrid (int new_grid[][SIZE+2], int * move_list, int permutation[][PESIZE]){
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if(idx<device_removed_move_list_end){
int idxTox = idx / PESIZE;
int idxToy = idx % PESIZE;
int agent_position = permutation[idxTox][idxToy];
new_grid[agent_position/(SIZE+2)][agent_position%(SIZE+2)] = 0;
}
}
__global__ void assign (int grid[][SIZE+2], int new_grid[][SIZE+2], int permutation[][PESIZE], int * move_list, int * space_list, int samples[SAM_NUM_VALUES]){
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if(idx < (device_removed_move_list_end) ){
int idxTox = idx / PESIZE;
int idxToy = idx % PESIZE;
int space_position = space_list[samples[idx]-1];
int agent_position = permutation[idxTox][idxToy];
new_grid[space_position/(SIZE+2)][space_position%(SIZE+2)] = grid[agent_position/(SIZE+2)][agent_position%(SIZE+2)];
}
}
__global__ void checkNumberDevice(int new_grid[][SIZE+2]){
int agentTypeOne = 0;
int agentTypeTwo = 0;
for(int i=0; i<SIZE+2; i++){
for(int j=0; j<SIZE+2; j++){
if(new_grid[i][j] == 1){
agentTypeOne +=1;
}
else if(new_grid[i][j] == 2){
agentTypeTwo += 1;
}
}
}
printf("Type One %d, Type Two %d\n",agentTypeOne, agentTypeTwo);
}
void checkNumber(int grid [SIZE+2][SIZE+2]){
int agentTypeOne = 0;
int agentTypeTwo = 0;
for(int i=0; i<SIZE+2; i++){
for(int j=0; j<SIZE+2; j++){
if(grid[i][j] == 1){
agentTypeOne +=1;
}
else if(grid[i][j] == 2){
agentTypeTwo += 1;
}
}
}
printf("Type One %d, Type Two %d\n",agentTypeOne, agentTypeTwo);
}
__global__ void devicePrintOutput(int device_list[][PESIZE]){
for(int i =0; i<device_penumber_inuse; i++){
//for(int j=0; j<random_list_counter[i];j++){
// printf("%d \n",i);
for(int j=0; j<PESIZE;j++){
//printf("PE %d, index %d, value %d\n", i, j, device_list[i][j]);
printf("%d ",device_list[i][j]);
}
printf("\n");
}
}
__global__ void initSamValue(int device_list[SAM_NUM_VALUES]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
device_list[idx] = idx+1;
}
__global__ void printSamples(int samples[SAM_NUM_VALUES]){
for(int i=0; i<(device_removed_move_list_end); i++){
printf("%d %d \n",i,samples[i]);
}
}
__global__ void printSamValue(int device_sam_list[SAM_NUM_VALUES]){
for(int i=0; i<(device_pe_inuse*SAM_PESIZE); i++){
printf("%d ",device_sam_list[i]);
}
}
int host_grid[SIZE+2][SIZE+2];
int main(int argc, char* argv[])
{
struct timespec start, stop;
double accum;
int (*device_grid)[SIZE + 2];
int (*device_newGrid)[SIZE + 2];
int (*device_permutation_list)[PESIZE];
int (*device_temp_permutation_list)[PESIZE*FACTOR];
int (*random_list_counter);
int (*scanned_random_list_counter);
int (*move_list);
int (*removed_move_list_end);
int (*space_list);
int (*removed_space_list_end);
int (*samples);
int (*device_sam_list);
srand(SRAND_VALUE);
size_t bytes = sizeof(int)*(SIZE + 2)*(SIZE + 2);
myCurandState_t (*devState)[PESIZE];
myCurandState_t (*devStateHyper);
myCurandState_t (*devStateSam);
hipMalloc((void**)&devState, TOTAL * sizeof(myCurandState_t));
hipMalloc(&random_list_counter, sizeof(int)*(PENUMBER));
hipMalloc(&scanned_random_list_counter, sizeof(int)*(PENUMBER));
hipMalloc(&device_sam_list, sizeof(int)*(SAM_PESIZE)*(SAM_PENUMBER));
hipMalloc((void**)&device_grid, bytes);
hipMalloc((void**)&device_newGrid, bytes);
hipMalloc((void**)&device_permutation_list, sizeof(int)*(TOTAL));
hipMalloc((void**)&device_temp_permutation_list, sizeof(int)*(agentNumber)*FACTOR);
hipMalloc(&move_list, sizeof(int)*(SIZE + 2)*(SIZE + 2));
hipMalloc(&space_list, sizeof(int)*(SIZE + 2)*(SIZE + 2));
hipMalloc(&samples, sizeof(int)*(SAM_PESIZE)*(SAM_PENUMBER));
hipMalloc(&devStateHyper, SAM_PENUMBER * sizeof(myCurandState_t));
hipMalloc(&devStateSam, SAM_PENUMBER * sizeof(myCurandState_t));
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
int blockSizeVerPermu = numThreadsPerBlock / PESIZE;
dim3 blockSizePermu(blockSizeVerPermu, PESIZE, 1);
hipLaunchKernelGGL(( initCurand), dim3((ceil(TOTAL/double(numThreadsPerBlock)))),dim3(blockSizePermu), 0, 0, devState);
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
for (int i=0; i<(SIZE+2); i++){
for (int j=0; j<SIZE+2; j++){
host_grid[i][j] = 0;
}
}
int blockSizePerDim = sqrt(numThreadsPerBlock);
int gridSizePerDim = (SIZE + 2) / blockSizePerDim;
dim3 blockSize(blockSizePerDim, blockSizePerDim, 1);
dim3 gridSize(gridSizePerDim, gridSizePerDim, 1);
initPos(host_grid);
//printOutput(host_grid);
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
hipMemcpy(device_grid,host_grid,bytes,hipMemcpyHostToDevice);
hipMemcpy(device_newGrid,host_grid,bytes,hipMemcpyHostToDevice);
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
hipLaunchKernelGGL(( initSamCurand), dim3(((double)SAM_PENUMBER / SAM_numThreadsPerBlock)),dim3(SAM_numThreadsPerBlock), 0, 0, devStateSam);
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
update << <gridSize, blockSize >> >(device_grid, device_newGrid,move_list,space_list);
if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) {
perror( "clock gettime" );
exit( EXIT_FAILURE );
}
cached_allocator alloc;
int removed_list_number = 0;
int space_list_number = 0;
for(int i=0; i<ITERATIONS; i++){
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
compute << <gridSize, blockSize >> >(device_grid, device_newGrid, move_list, space_list, i);
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
removed_move_list_end = thrust::remove(thrust::hip::par(alloc), move_list, move_list + ((SIZE+2)*(SIZE+2)), 0);
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
removed_list_number = removed_move_list_end - move_list;
hipMemcpyToSymbol(device_removed_move_list_end, &removed_list_number, sizeof(int));
int TwoDimGridSize = ceil(removed_list_number/double(numThreadsPerBlock));
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
removed_space_list_end = thrust::remove(thrust::hip::par(alloc), space_list, space_list + ((SIZE+2)*(SIZE+2)), 0);
space_list_number = removed_space_list_end - space_list;
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
hipMemcpyToSymbol(device_removed_space_list_end, &space_list_number, sizeof(int));
int penumberinuse = ceil(removed_list_number/ double(PESIZE));
hipMemcpyToSymbol(device_penumber_inuse, &penumberinuse, sizeof(int));
hipLaunchKernelGGL(( generateList), dim3(ceil(space_list_number/double(numThreadsPerBlock))),dim3(blockSizePermu), 0, 0, device_permutation_list);
int sam_num_inuse = space_list_number;
int sam_pe_inuse = ceil(double(sam_num_inuse) / SAM_PESIZE);
hipMemcpyToSymbol(device_pe_inuse, &sam_pe_inuse, sizeof(int));
hipMemcpyToSymbol(device_num_inuse, &sam_num_inuse, sizeof(int));
hipLaunchKernelGGL(( clearSamples), dim3(ceil(sam_pe_inuse*SAM_PESIZE / (double)SAM_numThreadsPerBlock)), dim3(SAM_numThreadsPerBlock), 0, 0, samples);
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
int sam_gridSize = ceil((double)sam_pe_inuse / SAM_numThreadsPerBlock);
hipLaunchKernelGGL(( initSamValue), dim3(ceil(double(sam_num_inuse) / SAM_numThreadsPerBlock)), dim3(SAM_numThreadsPerBlock), 0, 0, device_sam_list);
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
hipLaunchKernelGGL(( sampleP), dim3(sam_gridSize), dim3(SAM_numThreadsPerBlock), 0, 0, devStateSam, devStateHyper, device_sam_list, samples, removed_list_number, 0, sam_pe_inuse-1);
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
int OneDimGridSize = ceil(penumberinuse / double(numThreadsPerBlock));
hipLaunchKernelGGL(( clearCounter), dim3(OneDimGridSize),dim3((numThreadsPerBlock)), 0, 0, random_list_counter);
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
hipLaunchKernelGGL(( sendToRandom), dim3(ceil(removed_list_number/double(numThreadsPerBlock))),dim3(blockSizePermu) , 0, 0, devState,move_list,device_temp_permutation_list,random_list_counter);
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
hipLaunchKernelGGL(( sortList), dim3(OneDimGridSize),dim3((numThreadsPerBlock)), 0, 0, device_temp_permutation_list,random_list_counter);
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
thrust::exclusive_scan(thrust::hip::par(alloc), random_list_counter, random_list_counter + penumberinuse, scanned_random_list_counter);
hipLaunchKernelGGL(( randomPermute), dim3(OneDimGridSize),dim3((numThreadsPerBlock)), 0, 0, devState,device_temp_permutation_list,random_list_counter);
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
hipLaunchKernelGGL(( recoverSize), dim3(OneDimGridSize),dim3((numThreadsPerBlock)), 0, 0, device_permutation_list, device_temp_permutation_list,random_list_counter,scanned_random_list_counter);
thrust::remove(thrust::device, samples, samples + sam_pe_inuse*SAM_PESIZE , 0);
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
hipLaunchKernelGGL(( prepareNewGrid) , dim3(TwoDimGridSize), dim3(numThreadsPerBlock) , 0, 0, device_newGrid, move_list,device_permutation_list);
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
hipLaunchKernelGGL(( assign) , dim3(TwoDimGridSize), dim3(numThreadsPerBlock), 0, 0, device_grid, device_newGrid, device_permutation_list, move_list, space_list,samples);
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
update << <gridSize, blockSize >> >(device_grid, device_newGrid,move_list,space_list);
#ifdef DEBUG
hipDeviceSynchronize();
cudaCheckError();
#endif
}
if( clock_gettime( CLOCK_REALTIME, &stop) == -1 ) {
perror( "clock gettime" );
exit( EXIT_FAILURE );
}
accum = ( stop.tv_sec - start.tv_sec ) * 1e6
+ ( stop.tv_nsec - start.tv_nsec ) / 1e3;
printf( "%.1f Time is %.5f s \n",float(OCCUPANCY), accum / 1e6);
hipMemcpy(host_grid, device_newGrid, bytes, hipMemcpyDeviceToHost);
//printOutput(host_grid);
//checkNumber(host_grid);
hipFree(device_grid);
hipFree(device_newGrid);
hipFree(device_permutation_list);
hipFree(device_temp_permutation_list);
hipFree(move_list);
hipFree(random_list_counter);
hipFree(scanned_random_list_counter);
hipFree(space_list);
hipFree(devState);
hipFree(samples);
hipFree(devStateSam);
hipFree(devStateHyper);
hipFree(device_sam_list);
return 0;
}
void printOutput(int grid [SIZE+2][SIZE+2] ){ //output grid from 1 t o SIZE+1
for (int i=1; i<SIZE+1; i++){
for (int j=1; j<SIZE+1; j++){
printf("%d ",grid[i][j]);
//if(i%SIZE)
}
printf("\n");
}
printf("\n");
}
void initPos(int grid [SIZE+2][SIZE+2]){ // type 1 and 2 to grid randomly
int row;
int column;
for(int i=0; i<agentTypeOneNumber; i++){
do{
row = random_location();
column = random_location();
}while(grid[row][column] != 0);
grid[row][column] = 1;
}
for(int i=0; i<agentTypeTwoNumber; i++){
do{
row = random_location();
column = random_location();
}while(grid[row][column] != 0);
grid[row][column] = 2;
}
}
int random_location() { //generate a random number from 1 to SIZE+1
int r;
r = rand();
return (r % (SIZE) +1 );
}
|
acae645ace0671ecb57baa0427dac62e9beaed70.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <math.h>
#include <curand_kernel.h>
#include <time.h>
#include <unistd.h>
#include <thrust/scan.h>
#include <thrust/remove.h>
#include <thrust/execution_policy.h>
#include <iostream>
#include "custom_temporary_allocation.cuh"
#include "parameter.cuh"
using namespace std;
typedef curandStatePhilox4_32_10_t myCurandState_t;
//#define DEBUG
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(0); \
} \
}
#define FACTOR 5
#define ITERATIONS 100
#define TOTAL (SIZE * SIZE)
#define GRIDSIZE (SIZE+2)
#define GRIDTOTAL (SIZE+2)*(SIZE+2)
#define SRAND_VALUE 200
#define PENUMBER (TOTAL/PESIZE)
#define SAM_NUM_VALUES ((SIZE+2)*(SIZE+2))
#define SAM_PENUMBER (SAM_NUM_VALUES / SAM_PESIZE)
const int agentTypeOneNumber = agentNumber / 2;
const int agentTypeTwoNumber = agentNumber - agentTypeOneNumber;
const int happinessThreshold = 5;
void printOutput(int [SIZE+2][SIZE+2]);
void initPos(int grid [SIZE+2][SIZE+2]);
int random_location();
__device__ static const int FAK_LEN = 1024; // length of factorial table
__device__ int hyp_n_last[SAM_PENUMBER], hyp_m_last[SAM_PENUMBER], hyp_N_last[SAM_PENUMBER]; // Last values of parameters
__device__ int hyp_mode[SAM_PENUMBER], hyp_mp[SAM_PENUMBER]; // Mode, mode+1
__device__ int hyp_bound[SAM_PENUMBER]; // Safety upper bound
__device__ double hyp_a[SAM_PENUMBER]; // hat center
__device__ double hyp_h[SAM_PENUMBER]; // hat width
__device__ double hyp_fm[SAM_PENUMBER]; // Value at mode
__device__ int device_pe_inuse;
__device__ int device_num_inuse;
__device__ int device_removed_move_list_end;
__device__ int device_removed_space_list_end;
__device__ int device_penumber_inuse;
__device__ int device_reduced_pe_position;
__device__ float getnextrand(myCurandState_t *state){
return (curand_uniform(state));
}
__global__ void initSamCurand(myCurandState_t state[SAM_PENUMBER]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < SAM_PENUMBER){
curand_init(idx, 0 , 0, &state[idx]);
}
}
__device__ const double
C0 = 0.918938533204672722, // ln(sqrt(2*pi))
C1 = 1./12.,
C3 = -1./360.;
__device__ double fac_table[FAK_LEN];
__device__ int initialized = 0;
__device__ double LnFac(int n) {
if (n < FAK_LEN) {
if (n <= 1) {
if (n < 0) printf("Parameter negative in LnFac function\n");
return 0;
}
if (!initialized) { // first time. Must initialize table
// make table of ln(n!)
double sum = fac_table[0] = 0.;
for (int i=1; i<FAK_LEN; i++) {
sum += log(double(i));
fac_table[i] = sum;
}
initialized = 1;
}
return fac_table[n];
}
// not found in table. use Stirling approximation
double n1, r;
n1 = n; r = 1. / n1;
return (n1 + 0.5)*log(n1) - n1 + C0 + r*(C1 + r*r*C3);
//return logf(n);
}
__device__ double fc_lnpk(int k, int L, int m, int n) {
// subfunction used by hypergeometric and Fisher's noncentral hypergeometric distribution
return(LnFac(k) + LnFac(m - k) + LnFac(n - k) + LnFac(L + k));
}
__device__ int HypInversionMod (myCurandState_t stateHyper[SAM_PENUMBER],int n, int m, int N, int idx) {
/*
Subfunction for Hypergeometric distribution. Assumes 0 <= n <= m <= N/2.
Overflow protection is needed when N > 680 or n > 75.
Hypergeometric distribution by inversion method, using down-up
search starting at the mode using the chop-down technique.
This method is faster than the rejection method when the variance is low.
*/
//int idx = threadIdx.x + blockIdx.x * blockDim.x;
// Sampling
int I; // Loop counter
int L = N - m - n; // Parameter
double modef; // mode, float
double Mp, np; // m + 1, n + 1
double p; // temporary
double U; // uniform random
double c, d; // factors in iteration
double divisor; // divisor, eliminated by scaling
double k1, k2; // float version of loop counter
double L1 = L; // float version of L
Mp = (double)(m + 1);
np = (double)(n + 1);
if (N != hyp_N_last[idx] || m != hyp_m_last[idx] || n != hyp_n_last[idx]) {
// set-up when parameters have changed
hyp_N_last[idx] = N; hyp_m_last[idx] = m; hyp_n_last[idx] = n;
p = Mp / (N + 2.);
modef = np * p; // mode, real
hyp_mode[idx] = (int)modef; // mode, integer
if (hyp_mode[idx] == modef && p == 0.5) {
hyp_mp[idx] = hyp_mode[idx]--;
}
else {
hyp_mp[idx] = hyp_mode[idx] + 1;
}
// mode probability, using log factorial function
// (may read directly from fac_table if N < FAK_LEN)
hyp_fm[idx] = exp(LnFac(N-m) - LnFac(L+hyp_mode[idx]) - LnFac(n-hyp_mode[idx])
+ LnFac(m) - LnFac(m-hyp_mode[idx]) - LnFac(hyp_mode[idx])
- LnFac(N) + LnFac(N-n) + LnFac(n) );
// safety bound - guarantees at least 17 significant decimal digits
// bound = min(n, (int)(modef + k*c'))
hyp_bound[idx] = (int)(modef + 11. * sqrt(modef * (1.-p) * (1.-n/(double)N)+1.));
if (hyp_bound[idx] > n) hyp_bound[idx] = n;
}
// loop until accepted
//int max_iterations = 1000;
while(1) {
// if(!(max_iterations--))
// break;
U = getnextrand(&stateHyper[idx]); // uniform random number to be converted
//printf(" U is %lf\n",U);
// start chop-down search at mode
if ((U -= hyp_fm[idx]) <= 0.) return(hyp_mode[idx]);
c = d = hyp_fm[idx];
// alternating down- and upward search from the mode
k1 = hyp_mp[idx] - 1; k2 = hyp_mode[idx] + 1;
for (I = 1; I <= hyp_mode[idx]; I++, k1--, k2++) {
// if(!(max_iterations--))
// break;
// Downward search from k1 = hyp_mp - 1
divisor = (np - k1)*(Mp - k1);
// Instead of dividing c with divisor, we multiply U and d because
// multiplication is faster. This will give overflow if N > 800
U *= divisor; d *= divisor;
c *= k1 * (L1 + k1);
if ((U -= c) <= 0.) return(hyp_mp[idx] - I - 1); // = k1 - 1
//printf("Line 228 I %d \n",I);
// Upward search from k2 = hyp_mode + 1
divisor = k2 * (L1 + k2);
// re-scale parameters to avoid time-consuming division
U *= divisor; c *= divisor;
d *= (np - k2) * (Mp - k2);
if ((U -= d) <= 0.) return(hyp_mode[idx] + I); // = k2
// Values of n > 75 or N > 680 may give overflow if you leave out this..
// overflow protection
// if (U > 1.E100) {U *= 1.E-100; c *= 1.E-100; d *= 1.E-100;}
}
// Upward search from k2 = 2*mode + 1 to bound
for (k2 = I = hyp_mp[idx] + hyp_mode[idx]; I <= hyp_bound[idx]; I++, k2++) {
//if(!(max_iterations--))
// break;
divisor = k2 * (L1 + k2);
U *= divisor;
d *= (np - k2) * (Mp - k2);
if ((U -= d) <= 0.) return(I);
// more overflow protection
// if (U > 1.E100) {U *= 1.E-100; d *= 1.E-100;}
}
}
}
__device__ int HypRatioOfUnifoms (myCurandState_t stateHyper[SAM_PENUMBER], int n, int m, int N, int idx) {
/*
Subfunction for Hypergeometric distribution using the ratio-of-uniforms
rejection method.
This code is valid for 0 < n <= m <= N/2.
The computation time hardly depends on the parameters, except that it matters
a lot whether parameters are within the range where the LnFac function is
tabulated.
Reference: E. Stadlober: "The ratio of uniforms approach for generating
discrete random variates". Journal of Computational and Applied Mathematics,
vol. 31, no. 1, 1990, pp. 181-189.
*/
//int idx = threadIdx.x + blockIdx.x * blockDim.x;
const double SHAT1 = 2.943035529371538573; // 8/e
const double SHAT2 = 0.8989161620588987408; // 3-sqrt(12/e)
int L; // N-m-n
int mode; // mode
int k; // integer sample
double x; // real sample
double rNN; // 1/(N*(N+2))
double my; // mean
double var; // variance
double u; // uniform random
double lf; // ln(f(x))
L = N - m - n;
if (hyp_N_last[idx] != N || hyp_m_last[idx] != m || hyp_n_last[idx] != n) {
hyp_N_last[idx] = N; hyp_m_last[idx] = m; hyp_n_last[idx] = n; // Set-up
rNN = 1. / ((double)N*(N+2)); // make two divisions in one
my = (double)n * m * rNN * (N+2); // mean = n*m/N
mode = (int)(double(n+1) * double(m+1) * rNN * N); // mode = floor((n+1)*(m+1)/(N+2))
var = (double)n * m * (N-m) * (N-n) / ((double)N*N*(N-1));// variance
hyp_h[idx] = sqrt(SHAT1 * (var+0.5)) + SHAT2; // hat width
hyp_a[idx] = my + 0.5; // hat center
hyp_fm[idx] = fc_lnpk(mode, L, m, n); // maximum
hyp_bound[idx] = (int)(hyp_a[idx] + 4.0 * hyp_h[idx]); // safety-bound
if (hyp_bound[idx] > n) hyp_bound[idx] = n;
}
while(1) {
u = getnextrand(&stateHyper[idx]); // uniform random number
if (u == 0) continue; // avoid division by 0
x = hyp_a[idx] + hyp_h[idx] * (getnextrand(&stateHyper[idx])-0.5) / u; // generate hat distribution
if (x < 0. || x > 2E9) continue; // reject, avoid overflow
k = (int)x;
if (k > hyp_bound[idx]) continue; // reject if outside range
lf = hyp_fm[idx] - fc_lnpk(k,L,m,n); // ln(f(k))
if (u * (4.0 - u) - 3.0 <= lf) break; // lower squeeze accept
if (u * (u-lf) > 1.0) continue; // upper squeeze reject
if (2.0 * log(u) <= lf) break; // final acceptance
}
return k;
}
__device__ int Hypergeometric (myCurandState_t stateHyper[SAM_PENUMBER], int n, int m, int N, int idx) {
/*
This function generates a random variate with the hypergeometric
distribution. This is the distribution you get when drawing balls without
replacement from an urn with two colors. n is the number of balls you take,
m is the number of red balls in the urn, N is the total number of balls in
the urn, and the return value is the number of red balls you get.
This function uses inversion by chop-down search from the mode when
parameters are small, and the ratio-of-uniforms method when the former
method would be too slow or would give overflow.
*/
int fak, addd; // used for undoing transformations
int x; // result
hyp_n_last[idx] = hyp_m_last[idx] = hyp_N_last[idx] = -1; // Last values of hypergeometric parameters
// check if parameters are valid
if (n > N || m > N || n < 0 || m < 0) {
printf("Parameter out of range in hypergeometric function n %ld m %ld N %ld idx %d\n",n,m,N,idx);
printf("Parameter out of range in hypergeometric function %d,%d,%d,%d\n", n > N, m > N, n < 0, m < 0);
return 0;
}
// symmetry transformations
fak = 1; addd = 0;
if (m > N/2) {
// invert m
m = N - m;
fak = -1; addd = n;
}
if (n > N/2) {
// invert n
n = N - n;
addd += fak * m; fak = - fak;
}
if (n > m) {
// swap n and m
x = n; n = m; m = x;
}
// cases with only one possible result end here
if (n == 0) return addd;
//------------------------------------------------------------------
// choose method
//------------------------------------------------------------------
if (N > 680 || n > 70) {
// use ratio-of-uniforms method
x = HypRatioOfUnifoms (stateHyper, n, m, N,idx);
}
else {
// inversion method, using chop-down search from mode
x = HypInversionMod (stateHyper, n, m, N,idx);
}
// undo symmetry transformations
return x * fak + addd;
}
__global__ void clearSamples(int samples[SAM_NUM_VALUES]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < (SAM_NUM_VALUES)){
samples[idx] = 0;
}
}
__device__ void methodA(myCurandState_t state[SAM_PENUMBER],int N, int n, int num_sample, int initialTocurrent,int device_list[SAM_NUM_VALUES],int samples[SAM_NUM_VALUES]) {
//ASSERT_LEQ(n, N);
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// Initialization
int sample = 0;
double Nreal = (double) N;
double top = Nreal - n;
// Main loop
while (n >= 2) {
int S = 0;
double V = getnextrand(&state[idx]);
double quot = top / Nreal;
while (quot > V) {
S++;
top -= 1.0;
Nreal -= 1.0;
quot = (quot * top) / Nreal;
}
// Skip over next S records and select the following one
sample += S + 1;
//samples[idx][num_sample++] = sample + initialTocurrent;
samples[idx*SAM_PESIZE + num_sample++] = device_list[idx*SAM_PESIZE + sample + initialTocurrent-1];
//callback(sample);
Nreal -= 1.0;
n--;
}
if (n == 1) {
int S = round(Nreal) * getnextrand(&state[idx]);
sample += S + 1;
//samples[idx][num_sample++] = sample + initialTocurrent;
samples[idx*SAM_PESIZE + num_sample++] = device_list[idx*SAM_PESIZE + sample + initialTocurrent-1];
//callback(sample);
}
}
// Sampling method D from Vitter et al.
//
// \param N Size of population.
// \param n Number of samples.
// \param gen Uniform random variate generator.
// \param samples Function to process sample.
//
__device__ void sample(myCurandState_t state[SAM_PENUMBER], int N, int n, int device_list[SAM_NUM_VALUES], int samples[SAM_NUM_VALUES]) {
//ASSERT_LEQ(n, N);
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int initialN = N;
// Initialization
int sample = 0;
int num_sample = 0;
double nreal = (double) n;
double ninv = 1.0 / nreal;
double Nreal = (double) N;
double Vprime = exp(log(getnextrand(&state[idx])) * ninv);
int qu1 = N + 1 - n;
double qu1real = Nreal + 1.0 - nreal;
int negalphainv = -13;
int threshold = n * (-negalphainv);
int S = 0;
// Main loop
while (n > 1 && threshold < N) {
double nmin1inv = 1.0 / (nreal - 1.0);
double negSreal = 0.0;
while (true) {
// Step D2: Generate U and X
double X;
while (true) {
X = Nreal * (1.0 - Vprime);
S = X;
if (S < qu1) break;
Vprime = exp(log(getnextrand(&state[idx])) * ninv);
}
double U = getnextrand(&state[idx]);
negSreal = -(double)S;
// Step D3: Accept?
double y1 = exp(log(U * Nreal / qu1real) * nmin1inv);
Vprime = y1 * (-X / Nreal + 1.0) * (qu1real / (negSreal + qu1real));
if (Vprime <= 1.0) break; // Accept!
// Step D4: Accept?
double y2 = 1.0; double top = Nreal - 1.0;
double bottom;
double limit;
if (n - 1 > S) {
bottom = Nreal - nreal;
limit = N - S;
} else {
bottom = negSreal + Nreal - 1.0;
limit = qu1;
}
for (int t = N; t > limit; t--) {
y2 = (y2 * top) / bottom;
top -= 1.0;
bottom -= 1.0;
}
if (Nreal / (Nreal - X) >= y1 * exp(log(y2) * nmin1inv)) {
// Accept!
Vprime = exp(log(getnextrand(&state[idx])) * nmin1inv);
break;
}
Vprime = exp(log(getnextrand(&state[idx])) * ninv);
}
// Skip over next S records and select the following one
sample += S + 1;
//samples[idx][num_sample++] = sample;
samples[idx*SAM_PESIZE + num_sample++] = device_list[idx*SAM_PESIZE +sample-1];
//callback(sample);
N = (N - 1) - S;
Nreal = (Nreal - 1.0) + negSreal;
n--;
nreal -= 1.0;
ninv = nmin1inv;
qu1 -= S;
qu1real += negSreal;
threshold += negalphainv;
}
if (n > 1) {
int currentN = N;
methodA(state, N, n, num_sample, initialN - currentN, device_list,samples);
//samples[num_sample++] = sample + initialN - currentN;
//methodA(N, n, [&](int sample) {
// callback(sample + initialN - currentN);
//});
} else if (n == 1) {
S = N * Vprime;
// Skip over next S records and select the following one
sample += S + 1;
//samples[idx][num_sample++] = sample;
samples[idx*SAM_PESIZE + num_sample++] = device_list[idx*SAM_PESIZE +sample-1];
//callback(sample);
}
}
__global__ void sampleP(myCurandState_t state[SAM_PENUMBER], myCurandState_t stateHyper[SAM_PENUMBER], int device_list[SAM_NUM_VALUES],int samples[SAM_NUM_VALUES], int n, int j, int k) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
//idx += 1;
if(idx < device_pe_inuse){
int seed = 1;
//int counter = 0;
int m,x;
while(j - k != 0) {
curand_init(seed, 0 , 0, &stateHyper[idx]);
m = floor( (j+k)/2.0 );
//printf("sampleP1 n %d idx %d m %d\n",n,idx,m);
//__device__ int Hypergeometric (curandState stateHyper[PENUMBER],
//int n, int m, int N, int idx) {
/*
This function generates a random variate with the hypergeometric
distribution. This is the distribution you get when drawing balls without
replacement from an urn with two colors. n is the number of balls you take,
m is the number of red balls in the urn, N is the total number of balls in
the urn, and the return value is the number of red balls you get. */
//printf("would call Hypergeometric(stateHyper, %d, %d, %d, %d)\n", n, (m-j)*PESIZE + 1, (k-j)*PESIZE + 1, idx);
//printf("j is now %d, k is %d, m is %d, sums are %d and %d\n", j, k, m, k - (j - 1), m - (j - 1));
if(k != device_pe_inuse - 1){
x = Hypergeometric(stateHyper, n, (m-(j-1))*SAM_PESIZE, (k-(j-1))*SAM_PESIZE, idx);
}
else{
x = Hypergeometric(stateHyper, n, (m-(j-1))*SAM_PESIZE, ((k-1)-(j-1))*SAM_PESIZE + device_num_inuse % SAM_PESIZE, idx);
}
//printf("sampleP2 n %d idx %d x %d\n",n,idx,x);
//int x = m;
if(idx <= m) {
n = x;
k = m;
seed = seed * 2;
} else {
n = n-x;
j = m + 1;
seed = seed * 2 + 1;
}
}
//printf("sample n %d \n",n);
if(idx != device_pe_inuse - 1 ) {
//printf("idx %d sampling %d values\n", idx, n);
sample(state, SAM_PESIZE, n, device_list, samples);
}
else {
//printf("n > PESIZE %d \n",n);
sample(state, device_num_inuse % SAM_PESIZE, n, device_list, samples);
}
/*if(n <= PESIZE ) {
//printf("idx %d sampling %d values\n", idx, n);
sample(state, PESIZE, n, device_list, samples);
}
else {
printf("n > PESIZE %d \n",n);
}*/
}
}
//__global__ void print_device_reduced_pe_position(){
//printf("reduced_pe_position %d \n",( int( 0.5 + ceil((float)device_reduced_pe_position / (PESIZE) )) ) );
//printf("device_reduced_pe_position %d \n",(device_reduced_pe_position ) );
//}
__global__ void initCurand(myCurandState_t state[][PESIZE]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy=blockIdx.y*blockDim.y+threadIdx.y;
if(idx < PENUMBER && idy<PESIZE){
curand_init(idx*(PESIZE)+idy,0 , 0, &state[idx][idy]);
}
}
__global__ void compute(int grid[][SIZE+2], int new_grid[][SIZE+2], int * move_list, int * space_list, int iteration){
int idx=blockIdx.x*blockDim.x+threadIdx.x;
int idy=blockIdx.y*blockDim.y+threadIdx.y;
int sameTypeCount=0;
int current_id = idx*(SIZE+2)+idy;
if(grid[idx][idy] != 0){
int currentType = grid[idx][idy];
if(grid[idx-1][idy-1] == currentType){
sameTypeCount += 1;
}
if(grid[idx-1][idy] == currentType){
sameTypeCount += 1;
}
if(grid[idx-1][idy+1] == currentType){
sameTypeCount += 1;
}
if(grid[idx][idy-1] == currentType){
sameTypeCount += 1;
}
if(grid[idx][idy+1] == currentType){
sameTypeCount += 1;
}
if(grid[idx+1][idy-1] == currentType){
sameTypeCount += 1;
}
if(grid[idx+1][idy] == currentType){
sameTypeCount += 1;
}
if(grid[idx+1][idy+1] == currentType){
sameTypeCount += 1;
}
if(sameTypeCount < happinessThreshold){
move_list[current_id] = current_id;
space_list[current_id] = current_id;
}
}
else if(idx != 0 && idy !=0 && idx != (SIZE+1) && idy != (SIZE+1) ){
space_list[current_id] = current_id;
}
}
__global__ void update (int grid[][SIZE+2], int new_grid[][SIZE+2], int * move_list, int * space_list){
int idx=blockIdx.x*blockDim.x+threadIdx.x;
int idy=blockIdx.y*blockDim.y+threadIdx.y;
grid[idy][idx] = new_grid[idy][idx];
move_list[idx*(SIZE+2)+idy] = 0;
space_list[idx*(SIZE+2)+idy] = 0;
}
__global__ void sendToRandomPerpe(myCurandState_t state[][PESIZE],int device_list[SAM_NUM_VALUES], int temp_device_list[][PESIZE*FACTOR],int random_list_counter[PENUMBER]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < device_penumber_inuse -1 ){
for(int i=0; i < PESIZE; i++ ){
float r = getnextrand(&state[idx][0]);
int random_position = r * (device_penumber_inuse-1);
int acquired_position = atomicAdd(&random_list_counter[random_position],1);
temp_device_list[random_position][acquired_position] = device_list[idx*PESIZE+i];
}
}
else if(idx == device_penumber_inuse - 1 ){
for(int i=0; i < device_removed_move_list_end % PESIZE; i++ ){
float r = getnextrand(&state[idx][0]);
int random_position = r * (device_penumber_inuse-1);
int acquired_position = atomicAdd(&random_list_counter[random_position],1);
temp_device_list[random_position][acquired_position] = device_list[idx*PESIZE+i];
}
}
}
__global__ void sendToRandom(myCurandState_t state[][PESIZE],int device_list[SAM_NUM_VALUES], int temp_device_list[][PESIZE*FACTOR],int random_list_counter[PENUMBER]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx*PESIZE +idy < device_removed_move_list_end ){
float r = getnextrand(&state[idx][idy]);
int random_position = r * (device_penumber_inuse-1);
int acquired_position = atomicAdd(&random_list_counter[random_position],1);
temp_device_list[random_position][acquired_position] = device_list[idx*PESIZE+idy];
}
}
__global__ void clearCounter(int random_list_counter[PENUMBER]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < device_penumber_inuse){
random_list_counter[idx] = 0;
}
}
__global__ void generateList(int device_list[][PESIZE]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx*PESIZE +idy < device_removed_space_list_end ){
device_list[idx][idy] = idx*PESIZE +idy;
}
}
static __device__ void swap(int *data, int x, int y)
{
int temp = data[x];
data[x] = data[y];
data[y] = temp;
}
static __device__ int partition(int *data, int left, int right)
{
const int mid = left + (right - left) / 2;
const int pivot = data[(mid)];
swap(data, (mid), (left));
int i = left + 1;
int j = right;
while (i <= j) {
while (i <= j && data[(i)] <= pivot) {
i++;
}
while (i <= j && data[(j)] > pivot) {
j--;
}
if (i < j) {
swap(data, (i), (j));
}
}
swap(data, (i - 1), (left));
return i - 1;
}
typedef struct sort_data {
int left;
int right;
} sort_data;
__device__ void quicksort_seq(int *data, int right)
{
int left = 0;
if(left == right)
return;
if (left > right) {
right = 1 + right;
}
int stack_size = 0;
sort_data stack[PESIZE*FACTOR];
stack[stack_size++] = { left, right };
while (stack_size > 0) {
int curr_left = stack[stack_size - 1].left;
int curr_right = stack[stack_size - 1].right;
stack_size--;
if (curr_left < curr_right) {
int part = partition(data, curr_left, curr_right);
stack[stack_size++] = {curr_left, part - 1};
stack[stack_size++] = {part + 1, curr_right};
}
}
}
__global__ void sortList(int temp_device_list[][PESIZE*FACTOR], int random_list_counter[PENUMBER]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < device_penumber_inuse){
int number = random_list_counter[idx];
if(number != 0){
quicksort_seq(temp_device_list[idx], number - 1 );
}
}
}
__global__ void randomPermute(myCurandState_t state[][PESIZE], int temp_device_list[][PESIZE*FACTOR], int random_list_counter[PENUMBER]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int reduced_pe = device_penumber_inuse;
if(idx < reduced_pe){
for (int i = 0; i < random_list_counter[idx]; i++){
float r = getnextrand(&state[idx][0]);
int j = r * (random_list_counter[idx]-1);
int temp = temp_device_list[idx][i] ;
temp_device_list[idx][i] = temp_device_list[idx][j] ;
temp_device_list[idx][j] = temp;
}
}
}
__global__ void recoverSize(int device_list[][PESIZE], int temp_device_list[][PESIZE*FACTOR],int random_list_counter[PENUMBER], int scanned_random_list_counter[PENUMBER]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int reduced_pe = device_penumber_inuse;
if(idx < reduced_pe){
int delta = scanned_random_list_counter[idx];
for(int i=0; i<random_list_counter[idx]; i++){
int addValue = delta + i;
int interResult = device_penumber_inuse*addValue/(PESIZE*device_penumber_inuse);
device_list[interResult][(delta- (PESIZE*device_penumber_inuse/device_penumber_inuse)*interResult + i)] = temp_device_list[idx][i];
}
}
}
struct smaller_than
{
__device__
bool operator()(const int x)
{
return (x < device_removed_space_list_end) == 0;
}
};
struct greater_than
{
__device__
bool operator()(int x)
{
return x > device_removed_move_list_end;
}
};
__global__ void printTempList(int temp_device_list[][PESIZE*FACTOR], int random_list_counter[PENUMBER]){
for(int i =0; i<device_penumber_inuse; i++){
for(int j=0; j<random_list_counter[i];j++){
printf("%d ",temp_device_list[i][j]);
}
printf("\n");
}
}
__global__ void printList(int * list,int *removed_list_end){
printf( "SIZE %d \n",removed_list_end - list) ;
for(int i=0; i<removed_list_end - list; i++){
printf("%d ",list[i]);
}
printf("\n");
}
__global__ void printListPre(int * list){
printf( "SIZE %d \n",device_removed_space_list_end) ;
for(int i=0; i<device_removed_space_list_end; i++){
printf("%d ",list[i]);
}
printf("\n");
}
__global__ void prepareNewGrid (int new_grid[][SIZE+2], int * move_list, int permutation[][PESIZE]){
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if(idx<device_removed_move_list_end){
int idxTox = idx / PESIZE;
int idxToy = idx % PESIZE;
int agent_position = permutation[idxTox][idxToy];
new_grid[agent_position/(SIZE+2)][agent_position%(SIZE+2)] = 0;
}
}
__global__ void assign (int grid[][SIZE+2], int new_grid[][SIZE+2], int permutation[][PESIZE], int * move_list, int * space_list, int samples[SAM_NUM_VALUES]){
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if(idx < (device_removed_move_list_end) ){
int idxTox = idx / PESIZE;
int idxToy = idx % PESIZE;
int space_position = space_list[samples[idx]-1];
int agent_position = permutation[idxTox][idxToy];
new_grid[space_position/(SIZE+2)][space_position%(SIZE+2)] = grid[agent_position/(SIZE+2)][agent_position%(SIZE+2)];
}
}
__global__ void checkNumberDevice(int new_grid[][SIZE+2]){
int agentTypeOne = 0;
int agentTypeTwo = 0;
for(int i=0; i<SIZE+2; i++){
for(int j=0; j<SIZE+2; j++){
if(new_grid[i][j] == 1){
agentTypeOne +=1;
}
else if(new_grid[i][j] == 2){
agentTypeTwo += 1;
}
}
}
printf("Type One %d, Type Two %d\n",agentTypeOne, agentTypeTwo);
}
void checkNumber(int grid [SIZE+2][SIZE+2]){
int agentTypeOne = 0;
int agentTypeTwo = 0;
for(int i=0; i<SIZE+2; i++){
for(int j=0; j<SIZE+2; j++){
if(grid[i][j] == 1){
agentTypeOne +=1;
}
else if(grid[i][j] == 2){
agentTypeTwo += 1;
}
}
}
printf("Type One %d, Type Two %d\n",agentTypeOne, agentTypeTwo);
}
__global__ void devicePrintOutput(int device_list[][PESIZE]){
for(int i =0; i<device_penumber_inuse; i++){
//for(int j=0; j<random_list_counter[i];j++){
// printf("%d \n",i);
for(int j=0; j<PESIZE;j++){
//printf("PE %d, index %d, value %d\n", i, j, device_list[i][j]);
printf("%d ",device_list[i][j]);
}
printf("\n");
}
}
__global__ void initSamValue(int device_list[SAM_NUM_VALUES]){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
device_list[idx] = idx+1;
}
__global__ void printSamples(int samples[SAM_NUM_VALUES]){
for(int i=0; i<(device_removed_move_list_end); i++){
printf("%d %d \n",i,samples[i]);
}
}
__global__ void printSamValue(int device_sam_list[SAM_NUM_VALUES]){
for(int i=0; i<(device_pe_inuse*SAM_PESIZE); i++){
printf("%d ",device_sam_list[i]);
}
}
int host_grid[SIZE+2][SIZE+2];
int main(int argc, char* argv[])
{
struct timespec start, stop;
double accum;
int (*device_grid)[SIZE + 2];
int (*device_newGrid)[SIZE + 2];
int (*device_permutation_list)[PESIZE];
int (*device_temp_permutation_list)[PESIZE*FACTOR];
int (*random_list_counter);
int (*scanned_random_list_counter);
int (*move_list);
int (*removed_move_list_end);
int (*space_list);
int (*removed_space_list_end);
int (*samples);
int (*device_sam_list);
srand(SRAND_VALUE);
size_t bytes = sizeof(int)*(SIZE + 2)*(SIZE + 2);
myCurandState_t (*devState)[PESIZE];
myCurandState_t (*devStateHyper);
myCurandState_t (*devStateSam);
cudaMalloc((void**)&devState, TOTAL * sizeof(myCurandState_t));
cudaMalloc(&random_list_counter, sizeof(int)*(PENUMBER));
cudaMalloc(&scanned_random_list_counter, sizeof(int)*(PENUMBER));
cudaMalloc(&device_sam_list, sizeof(int)*(SAM_PESIZE)*(SAM_PENUMBER));
cudaMalloc((void**)&device_grid, bytes);
cudaMalloc((void**)&device_newGrid, bytes);
cudaMalloc((void**)&device_permutation_list, sizeof(int)*(TOTAL));
cudaMalloc((void**)&device_temp_permutation_list, sizeof(int)*(agentNumber)*FACTOR);
cudaMalloc(&move_list, sizeof(int)*(SIZE + 2)*(SIZE + 2));
cudaMalloc(&space_list, sizeof(int)*(SIZE + 2)*(SIZE + 2));
cudaMalloc(&samples, sizeof(int)*(SAM_PESIZE)*(SAM_PENUMBER));
cudaMalloc(&devStateHyper, SAM_PENUMBER * sizeof(myCurandState_t));
cudaMalloc(&devStateSam, SAM_PENUMBER * sizeof(myCurandState_t));
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
int blockSizeVerPermu = numThreadsPerBlock / PESIZE;
dim3 blockSizePermu(blockSizeVerPermu, PESIZE, 1);
initCurand<<<(ceil(TOTAL/double(numThreadsPerBlock))),blockSizePermu>>>(devState);
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
for (int i=0; i<(SIZE+2); i++){
for (int j=0; j<SIZE+2; j++){
host_grid[i][j] = 0;
}
}
int blockSizePerDim = sqrt(numThreadsPerBlock);
int gridSizePerDim = (SIZE + 2) / blockSizePerDim;
dim3 blockSize(blockSizePerDim, blockSizePerDim, 1);
dim3 gridSize(gridSizePerDim, gridSizePerDim, 1);
initPos(host_grid);
//printOutput(host_grid);
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
cudaMemcpy(device_grid,host_grid,bytes,cudaMemcpyHostToDevice);
cudaMemcpy(device_newGrid,host_grid,bytes,cudaMemcpyHostToDevice);
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
initSamCurand<<<((double)SAM_PENUMBER / SAM_numThreadsPerBlock),SAM_numThreadsPerBlock>>>(devStateSam);
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
update << <gridSize, blockSize >> >(device_grid, device_newGrid,move_list,space_list);
if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) {
perror( "clock gettime" );
exit( EXIT_FAILURE );
}
cached_allocator alloc;
int removed_list_number = 0;
int space_list_number = 0;
for(int i=0; i<ITERATIONS; i++){
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
compute << <gridSize, blockSize >> >(device_grid, device_newGrid, move_list, space_list, i);
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
removed_move_list_end = thrust::remove(thrust::cuda::par(alloc), move_list, move_list + ((SIZE+2)*(SIZE+2)), 0);
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
removed_list_number = removed_move_list_end - move_list;
cudaMemcpyToSymbol(device_removed_move_list_end, &removed_list_number, sizeof(int));
int TwoDimGridSize = ceil(removed_list_number/double(numThreadsPerBlock));
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
removed_space_list_end = thrust::remove(thrust::cuda::par(alloc), space_list, space_list + ((SIZE+2)*(SIZE+2)), 0);
space_list_number = removed_space_list_end - space_list;
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
cudaMemcpyToSymbol(device_removed_space_list_end, &space_list_number, sizeof(int));
int penumberinuse = ceil(removed_list_number/ double(PESIZE));
cudaMemcpyToSymbol(device_penumber_inuse, &penumberinuse, sizeof(int));
generateList<<<ceil(space_list_number/double(numThreadsPerBlock)),blockSizePermu>>>(device_permutation_list);
int sam_num_inuse = space_list_number;
int sam_pe_inuse = ceil(double(sam_num_inuse) / SAM_PESIZE);
cudaMemcpyToSymbol(device_pe_inuse, &sam_pe_inuse, sizeof(int));
cudaMemcpyToSymbol(device_num_inuse, &sam_num_inuse, sizeof(int));
clearSamples<<<ceil(sam_pe_inuse*SAM_PESIZE / (double)SAM_numThreadsPerBlock), SAM_numThreadsPerBlock>>>(samples);
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
int sam_gridSize = ceil((double)sam_pe_inuse / SAM_numThreadsPerBlock);
initSamValue<<<ceil(double(sam_num_inuse) / SAM_numThreadsPerBlock), SAM_numThreadsPerBlock>>>(device_sam_list);
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
sampleP<<<sam_gridSize, SAM_numThreadsPerBlock>>>( devStateSam, devStateHyper, device_sam_list, samples, removed_list_number, 0, sam_pe_inuse-1);
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
int OneDimGridSize = ceil(penumberinuse / double(numThreadsPerBlock));
clearCounter<<<OneDimGridSize,(numThreadsPerBlock)>>>(random_list_counter);
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
sendToRandom<<<ceil(removed_list_number/double(numThreadsPerBlock)),blockSizePermu >>>(devState,move_list,device_temp_permutation_list,random_list_counter);
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
sortList<<<OneDimGridSize,(numThreadsPerBlock)>>>(device_temp_permutation_list,random_list_counter);
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
thrust::exclusive_scan(thrust::cuda::par(alloc), random_list_counter, random_list_counter + penumberinuse, scanned_random_list_counter);
randomPermute<<<OneDimGridSize,(numThreadsPerBlock)>>>(devState,device_temp_permutation_list,random_list_counter);
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
recoverSize<<<OneDimGridSize,(numThreadsPerBlock)>>>(device_permutation_list, device_temp_permutation_list,random_list_counter,scanned_random_list_counter);
thrust::remove(thrust::device, samples, samples + sam_pe_inuse*SAM_PESIZE , 0);
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
prepareNewGrid <<<TwoDimGridSize, numThreadsPerBlock >>> (device_newGrid, move_list,device_permutation_list);
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
assign <<<TwoDimGridSize, numThreadsPerBlock>>> (device_grid, device_newGrid, device_permutation_list, move_list, space_list,samples);
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
update << <gridSize, blockSize >> >(device_grid, device_newGrid,move_list,space_list);
#ifdef DEBUG
cudaDeviceSynchronize();
cudaCheckError();
#endif
}
if( clock_gettime( CLOCK_REALTIME, &stop) == -1 ) {
perror( "clock gettime" );
exit( EXIT_FAILURE );
}
accum = ( stop.tv_sec - start.tv_sec ) * 1e6
+ ( stop.tv_nsec - start.tv_nsec ) / 1e3;
printf( "%.1f Time is %.5f s \n",float(OCCUPANCY), accum / 1e6);
cudaMemcpy(host_grid, device_newGrid, bytes, cudaMemcpyDeviceToHost);
//printOutput(host_grid);
//checkNumber(host_grid);
cudaFree(device_grid);
cudaFree(device_newGrid);
cudaFree(device_permutation_list);
cudaFree(device_temp_permutation_list);
cudaFree(move_list);
cudaFree(random_list_counter);
cudaFree(scanned_random_list_counter);
cudaFree(space_list);
cudaFree(devState);
cudaFree(samples);
cudaFree(devStateSam);
cudaFree(devStateHyper);
cudaFree(device_sam_list);
return 0;
}
void printOutput(int grid [SIZE+2][SIZE+2] ){ //output grid from 1 t o SIZE+1
for (int i=1; i<SIZE+1; i++){
for (int j=1; j<SIZE+1; j++){
printf("%d ",grid[i][j]);
//if(i%SIZE)
}
printf("\n");
}
printf("\n");
}
void initPos(int grid [SIZE+2][SIZE+2]){ // type 1 and 2 to grid randomly
int row;
int column;
for(int i=0; i<agentTypeOneNumber; i++){
do{
row = random_location();
column = random_location();
}while(grid[row][column] != 0);
grid[row][column] = 1;
}
for(int i=0; i<agentTypeTwoNumber; i++){
do{
row = random_location();
column = random_location();
}while(grid[row][column] != 0);
grid[row][column] = 2;
}
}
int random_location() { //generate a random number from 1 to SIZE+1
int r;
r = rand();
return (r % (SIZE) +1 );
}
|
fc254af8ffd7f78a86fc8e4ee6d0f5091c75b7bd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "conv.h"
#include "conv_common.h"
#include <iostream>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
std::cerr << "GPUassert: " << hipGetErrorString(code) << " " << file << " " << line << std::endl;
if (abort) exit(code);
}
}
__global__ void simple_conv(float* in_data, float* out_data, uint width, uint height, float* mask, uint mask_width, uint mask_height)
{
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
if (idx_x >= width || idx_y >= height)
return;
int mask_hw = mask_width/2;
int mask_hh = mask_height/2;
float sum = 0.0;
float mask_sum = 0.0;
for (uint x = 0 ; x < mask_width ; ++x)
{
for (uint y = 0 ; y < mask_height ; ++y)
{
int px = idx_x + (x - mask_hw);
int py = idx_y + (y - mask_hh);
if (px < 0 || px >= width || py < 0 || py >= height)
continue;
float m_value = mask[x + y * mask_width];
sum += m_value * in_data[px + py * width];
mask_sum += m_value;
}
}
out_data[idx_x + idx_y * width] = sum / mask_sum;
}
float* gpu_conv(float* image, uint width, uint height, float* mask, uint mask_width, uint mask_height, struct benchmark& bench)
{
uint size = width * height;
float* out_image = new float[size];
uint mask_size = mask_width * mask_height;
hipEvent_t total_start, transfer_start, compute_start, total_stop, transfer_stop, compute_stop;
hipEventCreate(&total_start); hipEventCreate(&transfer_start); hipEventCreate(&compute_start);
hipEventCreate(&total_stop); hipEventCreate(&transfer_stop); hipEventCreate(&compute_stop);
hipEventRecord(total_start);
float *d_in, *d_out, *d_mask;
gpuErrchk( hipMalloc((void**) &d_in, size * sizeof(float)) );
gpuErrchk( hipMalloc((void**) &d_out, size * sizeof(float)) );
gpuErrchk( hipMalloc((void**) &d_mask, mask_size * sizeof(float)) );
hipEventRecord(transfer_start);
gpuErrchk( hipMemcpy(d_in, image, size * sizeof(float), hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_mask, mask, mask_size * sizeof(float), hipMemcpyHostToDevice) );
// Determining best threads per block & block number to run the kernel
struct hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
int maxThreadsPerBlock = deviceProp.maxThreadsPerBlock;
float dim = sqrt((float)maxThreadsPerBlock);
const dim3 blockDim(dim, dim);
const dim3 numBlocks(width/dim, height/dim);
hipEventRecord(compute_start);
hipLaunchKernelGGL(( simple_conv), dim3(numBlocks), dim3(blockDim), 0, 0, d_in, d_out, width, height, d_mask, mask_width, mask_height);
hipEventRecord(compute_stop);
gpuErrchk( hipMemcpy(out_image, d_out, size * sizeof(float), hipMemcpyDeviceToHost) );
hipEventRecord(transfer_stop);
hipEventRecord(total_stop);
hipEventSynchronize(total_stop);
float compute_time, transfer_time, total_time;
hipEventElapsedTime(&compute_time, compute_start, compute_stop);
hipEventElapsedTime(&transfer_time, transfer_start, transfer_stop);
hipEventElapsedTime(&total_time, total_start, total_stop);
bench.compute_time = compute_time;
bench.transfer_time = transfer_time;
bench.total_time = total_time;
gpuErrchk( hipFree(d_in) );
gpuErrchk( hipFree(d_out) );
gpuErrchk( hipFree(d_mask) );
return out_image;
}
|
fc254af8ffd7f78a86fc8e4ee6d0f5091c75b7bd.cu
|
#include "conv.h"
#include "conv_common.h"
#include <iostream>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
std::cerr << "GPUassert: " << cudaGetErrorString(code) << " " << file << " " << line << std::endl;
if (abort) exit(code);
}
}
__global__ void simple_conv(float* in_data, float* out_data, uint width, uint height, float* mask, uint mask_width, uint mask_height)
{
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
if (idx_x >= width || idx_y >= height)
return;
int mask_hw = mask_width/2;
int mask_hh = mask_height/2;
float sum = 0.0;
float mask_sum = 0.0;
for (uint x = 0 ; x < mask_width ; ++x)
{
for (uint y = 0 ; y < mask_height ; ++y)
{
int px = idx_x + (x - mask_hw);
int py = idx_y + (y - mask_hh);
if (px < 0 || px >= width || py < 0 || py >= height)
continue;
float m_value = mask[x + y * mask_width];
sum += m_value * in_data[px + py * width];
mask_sum += m_value;
}
}
out_data[idx_x + idx_y * width] = sum / mask_sum;
}
float* gpu_conv(float* image, uint width, uint height, float* mask, uint mask_width, uint mask_height, struct benchmark& bench)
{
uint size = width * height;
float* out_image = new float[size];
uint mask_size = mask_width * mask_height;
cudaEvent_t total_start, transfer_start, compute_start, total_stop, transfer_stop, compute_stop;
cudaEventCreate(&total_start); cudaEventCreate(&transfer_start); cudaEventCreate(&compute_start);
cudaEventCreate(&total_stop); cudaEventCreate(&transfer_stop); cudaEventCreate(&compute_stop);
cudaEventRecord(total_start);
float *d_in, *d_out, *d_mask;
gpuErrchk( cudaMalloc((void**) &d_in, size * sizeof(float)) );
gpuErrchk( cudaMalloc((void**) &d_out, size * sizeof(float)) );
gpuErrchk( cudaMalloc((void**) &d_mask, mask_size * sizeof(float)) );
cudaEventRecord(transfer_start);
gpuErrchk( cudaMemcpy(d_in, image, size * sizeof(float), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_mask, mask, mask_size * sizeof(float), cudaMemcpyHostToDevice) );
// Determining best threads per block & block number to run the kernel
struct cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
int maxThreadsPerBlock = deviceProp.maxThreadsPerBlock;
float dim = sqrt((float)maxThreadsPerBlock);
const dim3 blockDim(dim, dim);
const dim3 numBlocks(width/dim, height/dim);
cudaEventRecord(compute_start);
simple_conv<<<numBlocks, blockDim>>>(d_in, d_out, width, height, d_mask, mask_width, mask_height);
cudaEventRecord(compute_stop);
gpuErrchk( cudaMemcpy(out_image, d_out, size * sizeof(float), cudaMemcpyDeviceToHost) );
cudaEventRecord(transfer_stop);
cudaEventRecord(total_stop);
cudaEventSynchronize(total_stop);
float compute_time, transfer_time, total_time;
cudaEventElapsedTime(&compute_time, compute_start, compute_stop);
cudaEventElapsedTime(&transfer_time, transfer_start, transfer_stop);
cudaEventElapsedTime(&total_time, total_start, total_stop);
bench.compute_time = compute_time;
bench.transfer_time = transfer_time;
bench.total_time = total_time;
gpuErrchk( cudaFree(d_in) );
gpuErrchk( cudaFree(d_out) );
gpuErrchk( cudaFree(d_mask) );
return out_image;
}
|
ecb54fcd5de435fc7de23fbfbaac8df198ba0805.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "stdafx.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
using namespace std;
void main()
{
hipDeviceProp_t prop;
int count = 0;
hipGetDeviceCount( &count );
for( int index=0; index<count ;index++ )
{
hipGetDeviceProperties(&prop, index);
cout<<prop.name<<endl;
cout<<prop.minor<<" - "<<prop.major<<endl;
cout<<prop.clockRate<<endl;
}
cin.get();
}
|
ecb54fcd5de435fc7de23fbfbaac8df198ba0805.cu
|
#include "stdafx.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
void main()
{
cudaDeviceProp prop;
int count = 0;
cudaGetDeviceCount( &count );
for( int index=0; index<count ;index++ )
{
cudaGetDeviceProperties(&prop, index);
cout<<prop.name<<endl;
cout<<prop.minor<<" - "<<prop.major<<endl;
cout<<prop.clockRate<<endl;
}
cin.get();
}
|
9928142c719c72e0029c0cc7bfb14948980c1b02.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
template<typename T>
__global__
void non_maxima_supression_cuda(T* image_in,T* image_out,int widthImage,int heightImage)
{
unsigned int x=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int y=blockIdx.y*blockDim.y+threadIdx.y;
unsigned int index = x * widthImage + y;
if(y>0 && y<(widthImage-1) && x>0 && x<(heightImage-1) ){
T curr=image_in[index];
T curr_up=image_in[(x-1)*widthImage+(y)];
T curr_down=image_in[(x+1)*widthImage+(y)];
T curr_up_left=image_in[(x-1)*widthImage+(y-1)];
T curr_up_right=image_in[(x-1)*widthImage+(y+1)];
T curr_down_left=image_in[(x+1)*widthImage+(y-1)];
T curr_down_right=image_in[(x+1)*widthImage+(y+1)];
T curr_left=image_in[(x)*widthImage+(y-1)];
T curr_right=image_in[(x)*widthImage+(y+1)];
T max_element=curr;
if(curr_up>max_element)max_element=curr_up;
if(curr_down>max_element)max_element=curr_down;
if(curr_down_left>max_element)max_element=curr_down_left;
if(curr_down_right>max_element)max_element=curr_down_right;
if(curr_up_left>max_element)max_element=curr_up_left;
if(curr_up_right>max_element)max_element=curr_up_right;
if(curr_left>max_element)max_element=curr_left;
if(curr_right>max_element)max_element=curr_right;
if ( max_element != curr
||max_element==curr_up||max_element==curr_up_left||max_element==curr_up_right||max_element==curr_left )
image_out[ index ] = 0;
else
image_out[ index ] = curr;
}
else
{
image_out[index]=0;
}
}
template <typename T>
void calculate_non_maxima_cuda(T* image_in, T* image_out, int heightImage, int widthImage, int threadsX, int threadsY,hipStream_t stream)
{
dim3 block(threadsX, threadsY, 1);
dim3 grid(heightImage / block.x, widthImage / block.y, 1);
hipLaunchKernelGGL(( non_maxima_supression_cuda), dim3(grid),dim3(block),0,stream, image_in, image_out, widthImage, heightImage);
}
|
9928142c719c72e0029c0cc7bfb14948980c1b02.cu
|
#include <stdio.h>
template<typename T>
__global__
void non_maxima_supression_cuda(T* image_in,T* image_out,int widthImage,int heightImage)
{
unsigned int x=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int y=blockIdx.y*blockDim.y+threadIdx.y;
unsigned int index = x * widthImage + y;
if(y>0 && y<(widthImage-1) && x>0 && x<(heightImage-1) ){
T curr=image_in[index];
T curr_up=image_in[(x-1)*widthImage+(y)];
T curr_down=image_in[(x+1)*widthImage+(y)];
T curr_up_left=image_in[(x-1)*widthImage+(y-1)];
T curr_up_right=image_in[(x-1)*widthImage+(y+1)];
T curr_down_left=image_in[(x+1)*widthImage+(y-1)];
T curr_down_right=image_in[(x+1)*widthImage+(y+1)];
T curr_left=image_in[(x)*widthImage+(y-1)];
T curr_right=image_in[(x)*widthImage+(y+1)];
T max_element=curr;
if(curr_up>max_element)max_element=curr_up;
if(curr_down>max_element)max_element=curr_down;
if(curr_down_left>max_element)max_element=curr_down_left;
if(curr_down_right>max_element)max_element=curr_down_right;
if(curr_up_left>max_element)max_element=curr_up_left;
if(curr_up_right>max_element)max_element=curr_up_right;
if(curr_left>max_element)max_element=curr_left;
if(curr_right>max_element)max_element=curr_right;
if ( max_element != curr
||max_element==curr_up||max_element==curr_up_left||max_element==curr_up_right||max_element==curr_left )
image_out[ index ] = 0;
else
image_out[ index ] = curr;
}
else
{
image_out[index]=0;
}
}
template <typename T>
void calculate_non_maxima_cuda(T* image_in, T* image_out, int heightImage, int widthImage, int threadsX, int threadsY,cudaStream_t stream)
{
dim3 block(threadsX, threadsY, 1);
dim3 grid(heightImage / block.x, widthImage / block.y, 1);
non_maxima_supression_cuda<<<grid,block,0,stream>>>(image_in, image_out, widthImage, heightImage);
}
|
fe5a31b65a2a7e9ea4aff653fad21ae4d139dc06.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <vector>
#include "caffe/layers/scale_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ScaleForward(const int n, const Dtype* in,
const Dtype* scale, const int scale_dim, const int inner_dim,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index];
}
}
template <typename Dtype>
__global__ void ScaleBiasForward(const int n, const Dtype* in,
const Dtype* scale, const Dtype* bias,
const int scale_dim, const int inner_dim, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index] + bias[scale_index];
}
}
template <typename Dtype>
void ScaleLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
/* if(this->layer_param_.phase()==PREDICT){ LOG(INFO)<<"start forward : "<< this->layer_param_.name();
Forward_cpu(bottom,top);
return;
} */
const int count = top[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
if (bottom[0] == top[0]) {
// in-place computation; need to store bottom data before overwriting it.
// Note that this is only necessary for Backward; we could skip this if not
// doing Backward, but Caffe currently provides no way of knowing whether
// we'll need to do Backward at the time of the Forward call.
caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(),
temp_.mutable_gpu_data());
}
const Dtype* scale_data =
((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if (bias_layer_) {
const Dtype* bias_data = this->blobs_[bias_param_id_]->gpu_data();
ScaleBiasForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, scale_data, bias_data, scale_dim_, inner_dim_,
top_data);
} else {
ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, scale_data, scale_dim_, inner_dim_, top_data);
}
}
template <typename Dtype>
void ScaleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if(this->layer_param_.phase()==PREDICT_CPU){ LOG(INFO)<<"start forward : "<< this->layer_param_.name();
Forward_cpu(bottom,top);
return;
}
if (bias_layer_ &&
this->param_propagate_down_[this->param_propagate_down_.size() - 1]) {
bias_layer_->Backward(top, bias_propagate_down_, bias_bottom_vec_);
}
const bool scale_param = (bottom.size() == 1);
Blob<Dtype>* scale = scale_param ? this->blobs_[0].get() : bottom[1];
if ((!scale_param && propagate_down[1]) ||
(scale_param && this->param_propagate_down_[0])) {
const Dtype* top_diff = top[0]->gpu_diff();
const bool in_place = (bottom[0] == top[0]);
const Dtype* bottom_data = (in_place ? &temp_ : bottom[0])->gpu_data();
// Hack: store big eltwise product in bottom[0] diff, except in the special
// case where this layer itself does the eltwise product, in which case we
// can store it directly in the scale diff, and we're done.
// If we're computing in-place (and not doing eltwise computation), this
// hack doesn't work and we store the product in temp_.
const bool is_eltwise = (bottom[0]->count() == scale->count());
Dtype* product = (is_eltwise ? scale->mutable_gpu_diff() :
(in_place ? temp_.mutable_gpu_data() : bottom[0]->mutable_gpu_diff()));
caffe_gpu_mul(top[0]->count(), top_diff, bottom_data, product);
if (!is_eltwise) {
Dtype* sum_result = NULL;
if (inner_dim_ == 1) {
sum_result = product;
} else if (sum_result_.count() == 1) {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
Dtype* scale_diff = scale->mutable_cpu_diff();
if (scale_param) {
Dtype result;
caffe_gpu_dot(inner_dim_, product, sum_mult, &result);
*scale_diff += result;
} else {
caffe_gpu_dot(inner_dim_, product, sum_mult, scale_diff);
}
} else {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
sum_result = (outer_dim_ == 1) ?
scale->mutable_gpu_diff() : sum_result_.mutable_gpu_data();
caffe_gpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_,
Dtype(1), product, sum_mult, Dtype(0), sum_result);
}
if (outer_dim_ != 1) {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
if (scale_dim_ == 1) {
Dtype* scale_diff = scale->mutable_cpu_diff();
if (scale_param) {
Dtype result;
caffe_gpu_dot(outer_dim_, sum_mult, sum_result, &result);
*scale_diff += result;
} else {
caffe_gpu_dot(outer_dim_, sum_mult, sum_result, scale_diff);
}
} else {
Dtype* scale_diff = scale->mutable_gpu_diff();
caffe_gpu_gemv(CblasTrans, outer_dim_, scale_dim_,
Dtype(1), sum_result, sum_mult, Dtype(scale_param),
scale_diff);
}
}
}
}
if (propagate_down[0]) {
const int count = top[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* scale_data = scale->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, scale_data, scale_dim_, inner_dim_, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ScaleLayer);
} // namespace caffe
|
fe5a31b65a2a7e9ea4aff653fad21ae4d139dc06.cu
|
#include <cfloat>
#include <vector>
#include "caffe/layers/scale_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ScaleForward(const int n, const Dtype* in,
const Dtype* scale, const int scale_dim, const int inner_dim,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index];
}
}
template <typename Dtype>
__global__ void ScaleBiasForward(const int n, const Dtype* in,
const Dtype* scale, const Dtype* bias,
const int scale_dim, const int inner_dim, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index] + bias[scale_index];
}
}
template <typename Dtype>
void ScaleLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
/* if(this->layer_param_.phase()==PREDICT){ LOG(INFO)<<"start forward : "<< this->layer_param_.name();
Forward_cpu(bottom,top);
return;
} */
const int count = top[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
if (bottom[0] == top[0]) {
// in-place computation; need to store bottom data before overwriting it.
// Note that this is only necessary for Backward; we could skip this if not
// doing Backward, but Caffe currently provides no way of knowing whether
// we'll need to do Backward at the time of the Forward call.
caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(),
temp_.mutable_gpu_data());
}
const Dtype* scale_data =
((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if (bias_layer_) {
const Dtype* bias_data = this->blobs_[bias_param_id_]->gpu_data();
ScaleBiasForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, scale_data, bias_data, scale_dim_, inner_dim_,
top_data);
} else {
ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, scale_data, scale_dim_, inner_dim_, top_data);
}
}
template <typename Dtype>
void ScaleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if(this->layer_param_.phase()==PREDICT_CPU){ LOG(INFO)<<"start forward : "<< this->layer_param_.name();
Forward_cpu(bottom,top);
return;
}
if (bias_layer_ &&
this->param_propagate_down_[this->param_propagate_down_.size() - 1]) {
bias_layer_->Backward(top, bias_propagate_down_, bias_bottom_vec_);
}
const bool scale_param = (bottom.size() == 1);
Blob<Dtype>* scale = scale_param ? this->blobs_[0].get() : bottom[1];
if ((!scale_param && propagate_down[1]) ||
(scale_param && this->param_propagate_down_[0])) {
const Dtype* top_diff = top[0]->gpu_diff();
const bool in_place = (bottom[0] == top[0]);
const Dtype* bottom_data = (in_place ? &temp_ : bottom[0])->gpu_data();
// Hack: store big eltwise product in bottom[0] diff, except in the special
// case where this layer itself does the eltwise product, in which case we
// can store it directly in the scale diff, and we're done.
// If we're computing in-place (and not doing eltwise computation), this
// hack doesn't work and we store the product in temp_.
const bool is_eltwise = (bottom[0]->count() == scale->count());
Dtype* product = (is_eltwise ? scale->mutable_gpu_diff() :
(in_place ? temp_.mutable_gpu_data() : bottom[0]->mutable_gpu_diff()));
caffe_gpu_mul(top[0]->count(), top_diff, bottom_data, product);
if (!is_eltwise) {
Dtype* sum_result = NULL;
if (inner_dim_ == 1) {
sum_result = product;
} else if (sum_result_.count() == 1) {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
Dtype* scale_diff = scale->mutable_cpu_diff();
if (scale_param) {
Dtype result;
caffe_gpu_dot(inner_dim_, product, sum_mult, &result);
*scale_diff += result;
} else {
caffe_gpu_dot(inner_dim_, product, sum_mult, scale_diff);
}
} else {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
sum_result = (outer_dim_ == 1) ?
scale->mutable_gpu_diff() : sum_result_.mutable_gpu_data();
caffe_gpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_,
Dtype(1), product, sum_mult, Dtype(0), sum_result);
}
if (outer_dim_ != 1) {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
if (scale_dim_ == 1) {
Dtype* scale_diff = scale->mutable_cpu_diff();
if (scale_param) {
Dtype result;
caffe_gpu_dot(outer_dim_, sum_mult, sum_result, &result);
*scale_diff += result;
} else {
caffe_gpu_dot(outer_dim_, sum_mult, sum_result, scale_diff);
}
} else {
Dtype* scale_diff = scale->mutable_gpu_diff();
caffe_gpu_gemv(CblasTrans, outer_dim_, scale_dim_,
Dtype(1), sum_result, sum_mult, Dtype(scale_param),
scale_diff);
}
}
}
}
if (propagate_down[0]) {
const int count = top[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* scale_data = scale->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, scale_data, scale_dim_, inner_dim_, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ScaleLayer);
} // namespace caffe
|
0492a1a979f198a632c6bcd5876421c505159026.hip
|
// !!! This is a file automatically generated by hipify!!!
// matrixMul_kernel.cu
//
// Matrix multiplication: C = A * B.
//
//
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "CH\chTimer.h"
#include <rocblas.h>
#define block_size 32
/*
#define CHECK_BANK_CONFLICTS 0
#if CHECK_BANK_CONFLICTS
#define AS(i, j) cutilBankChecker(((float*)&As[0][0]), (BLOCK_SIZE * i + j))
#define BS(i, j) cutilBankChecker(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j))
#else
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
#endif
*/
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B (No shared memory used)
//! hA is A's height, wA is A's width and wB is B's width
//! In other words, it computes hAxwA matrix multiplied by wAxWB matrix.
//! Assume matrices are stored in row-major linear array, and matrix indexing is 0-based.
////////////////////////////////////////////////////////////////////////////////
extern "C" __global__ void
matrixMul_v1( float* C, float* A, float* B, int hA, int wA, int wB)
{
// index of the C matrix element computed by this thread
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Cvalue is used to store the element of the C matrix computed by this thread
float Cvalue;
if (row < hA && col < wB)
{
Cvalue = 0.0f;
// Loop over all the matrices of A and B required to compute C
for (int k = 0; k < wA; k++)
Cvalue += A[row * wA + k] * B[k * wB + col];
// Write the block sub-matrix to device memory; each thread writes one element
C[row * wB + col] = Cvalue;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B (shared memory used)
//! hA is A's height, wA is A's width and wB is B's width
//! In other words, it computes hAxwA matrix multiplied by wAxWB matrix.
//! Assume matrices are stored in row-major linear array, and matrix indexing is 0-based.
////////////////////////////////////////////////////////////////////////////////
extern "C" __global__ void
matrixMul_v2( float* C, float* A, float* B, int hA, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * block_size * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = block_size;
// Index of the first sub-matrix of B processed by the block
int bBegin = block_size * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = block_size * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[block_size][block_size];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[block_size][block_size];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < block_size; ++k)
Csub += As[ty][k] * Bs[k][tx];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * block_size * by + block_size * bx;
C[c + wB * ty + tx] = Csub;
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
/*
template <int block_size> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * block_size * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = block_size;
// Index of the first sub-matrix of B processed by the block
int bBegin = block_size * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = block_size * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[block_size][block_size];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[block_size][block_size];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < block_size; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * block_size * by + block_size * bx;
C[c + wB * ty + tx] = Csub;
}
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
hipError_t error;
error = hipMalloc((void **) &d_A, mem_size_A);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_B, mem_size_B);
if (error != hipSuccess)
{
printf("hipMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_C, mem_size_C);
if (error != hipSuccess)
{
printf("hipMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
printf("\nNote: For peak performance, please refer to the matrixMulCUBLAS example.\n");
hipDeviceReset();
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
*/
//
// Our CUDA matrix multiplication interface function
//
int GPU_SGEMM (float* C, float* A, float* B, int HA, int WA, int WB, int version)
{
float *d_A = 0;
float *d_B = 0;
float *d_C = 0;
// float alpha = 1.0f;
// float beta = 0.0f;
hipError_t cudaStatus;
// Make sure CUDA device 0 is available
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
return 1;
}
/* Allocate device memory for the matrices (d_A, d_B, and d_C) */
if (hipMalloc((void **)&d_A, HA * WA * sizeof(d_A[0])) != hipSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate A)\n");
return EXIT_FAILURE;
}
if (hipMalloc((void **)&d_B, WA * WB * sizeof(d_B[0])) != hipSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate B)\n");
return EXIT_FAILURE;
}
if (hipMalloc((void **)&d_C, HA * WB * sizeof(d_C[0])) != hipSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate C)\n");
return EXIT_FAILURE;
}
// Copy host memory (A, and B) to device
cudaStatus = hipMemcpy(d_A, A, HA*WA*sizeof(A[0]), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
printf("hipMemcpy (d_A, A) returned error code %d\n", cudaStatus);
exit(EXIT_FAILURE);
}
cudaStatus = hipMemcpy(d_B, B, WA*WB*sizeof(B[0]), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
printf("hipMemcpy (d_B, B) returned error code %d\n", cudaStatus);
exit(EXIT_FAILURE);
}
// Setup execution parameters and call kernel
dim3 block(block_size, block_size);
dim3 grid ((WB+block_size-1)/block_size, (HA+ block_size-1)/block_size);
if (version == 1)
hipLaunchKernelGGL(( matrixMul_v1), dim3(grid), dim3(block) , 0, 0, d_C, d_A, d_B, HA, WA, WB);
else
hipLaunchKernelGGL(( matrixMul_v2), dim3(grid), dim3(block) , 0, 0, d_C, d_A, d_B, HA, WA, WB);
hipDeviceSynchronize();
// Copy result (C) from device to host
cudaStatus = hipMemcpy(C, d_C, HA*WB*sizeof(C[0]), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
printf("hipMemcpy (C, d_C) returned error code\n", cudaStatus);
exit(EXIT_FAILURE);
}
/* Device memory clean up */
if (hipFree(d_A) != hipSuccess)
{
fprintf(stderr, "!!!! memory free error (A)\n");
return EXIT_FAILURE;
}
if (hipFree(d_B) != hipSuccess)
{
fprintf(stderr, "!!!! memory free error (B)\n");
return EXIT_FAILURE;
}
if (hipFree(d_C) != hipSuccess)
{
fprintf(stderr, "!!!! memory free error (C)\n");
return EXIT_FAILURE;
}
return 0;
}
|
0492a1a979f198a632c6bcd5876421c505159026.cu
|
// matrixMul_kernel.cu
//
// Matrix multiplication: C = A * B.
//
//
#include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "CH\chTimer.h"
#include <cublas_v2.h>
#define block_size 32
/*
#define CHECK_BANK_CONFLICTS 0
#if CHECK_BANK_CONFLICTS
#define AS(i, j) cutilBankChecker(((float*)&As[0][0]), (BLOCK_SIZE * i + j))
#define BS(i, j) cutilBankChecker(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j))
#else
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
#endif
*/
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B (No shared memory used)
//! hA is A's height, wA is A's width and wB is B's width
//! In other words, it computes hAxwA matrix multiplied by wAxWB matrix.
//! Assume matrices are stored in row-major linear array, and matrix indexing is 0-based.
////////////////////////////////////////////////////////////////////////////////
extern "C" __global__ void
matrixMul_v1( float* C, float* A, float* B, int hA, int wA, int wB)
{
// index of the C matrix element computed by this thread
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Cvalue is used to store the element of the C matrix computed by this thread
float Cvalue;
if (row < hA && col < wB)
{
Cvalue = 0.0f;
// Loop over all the matrices of A and B required to compute C
for (int k = 0; k < wA; k++)
Cvalue += A[row * wA + k] * B[k * wB + col];
// Write the block sub-matrix to device memory; each thread writes one element
C[row * wB + col] = Cvalue;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B (shared memory used)
//! hA is A's height, wA is A's width and wB is B's width
//! In other words, it computes hAxwA matrix multiplied by wAxWB matrix.
//! Assume matrices are stored in row-major linear array, and matrix indexing is 0-based.
////////////////////////////////////////////////////////////////////////////////
extern "C" __global__ void
matrixMul_v2( float* C, float* A, float* B, int hA, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * block_size * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = block_size;
// Index of the first sub-matrix of B processed by the block
int bBegin = block_size * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = block_size * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[block_size][block_size];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[block_size][block_size];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < block_size; ++k)
Csub += As[ty][k] * Bs[k][tx];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * block_size * by + block_size * bx;
C[c + wB * ty + tx] = Csub;
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
/*
template <int block_size> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * block_size * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = block_size;
// Index of the first sub-matrix of B processed by the block
int bBegin = block_size * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = block_size * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[block_size][block_size];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[block_size][block_size];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < block_size; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * block_size * by + block_size * bx;
C[c + wB * ty + tx] = Csub;
}
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
cudaError_t error;
error = cudaMalloc((void **) &d_A, mem_size_A);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_B, mem_size_B);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_C, mem_size_C);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\nNote: For peak performance, please refer to the matrixMulCUBLAS example.\n");
cudaDeviceReset();
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
*/
//
// Our CUDA matrix multiplication interface function
//
int GPU_SGEMM (float* C, float* A, float* B, int HA, int WA, int WB, int version)
{
float *d_A = 0;
float *d_B = 0;
float *d_C = 0;
// float alpha = 1.0f;
// float beta = 0.0f;
cudaError_t cudaStatus;
// Make sure CUDA device 0 is available
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
return 1;
}
/* Allocate device memory for the matrices (d_A, d_B, and d_C) */
if (cudaMalloc((void **)&d_A, HA * WA * sizeof(d_A[0])) != cudaSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate A)\n");
return EXIT_FAILURE;
}
if (cudaMalloc((void **)&d_B, WA * WB * sizeof(d_B[0])) != cudaSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate B)\n");
return EXIT_FAILURE;
}
if (cudaMalloc((void **)&d_C, HA * WB * sizeof(d_C[0])) != cudaSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate C)\n");
return EXIT_FAILURE;
}
// Copy host memory (A, and B) to device
cudaStatus = cudaMemcpy(d_A, A, HA*WA*sizeof(A[0]), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
printf("cudaMemcpy (d_A, A) returned error code %d\n", cudaStatus);
exit(EXIT_FAILURE);
}
cudaStatus = cudaMemcpy(d_B, B, WA*WB*sizeof(B[0]), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
printf("cudaMemcpy (d_B, B) returned error code %d\n", cudaStatus);
exit(EXIT_FAILURE);
}
// Setup execution parameters and call kernel
dim3 block(block_size, block_size);
dim3 grid ((WB+block_size-1)/block_size, (HA+ block_size-1)/block_size);
if (version == 1)
matrixMul_v1<<< grid, block >>>(d_C, d_A, d_B, HA, WA, WB);
else
matrixMul_v2<<< grid, block >>>(d_C, d_A, d_B, HA, WA, WB);
cudaDeviceSynchronize();
// Copy result (C) from device to host
cudaStatus = cudaMemcpy(C, d_C, HA*WB*sizeof(C[0]), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
printf("cudaMemcpy (C, d_C) returned error code\n", cudaStatus);
exit(EXIT_FAILURE);
}
/* Device memory clean up */
if (cudaFree(d_A) != cudaSuccess)
{
fprintf(stderr, "!!!! memory free error (A)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_B) != cudaSuccess)
{
fprintf(stderr, "!!!! memory free error (B)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_C) != cudaSuccess)
{
fprintf(stderr, "!!!! memory free error (C)\n");
return EXIT_FAILURE;
}
return 0;
}
|
9e3e99e819979c03c22c5afb7d2a33633038863f.hip
|
// !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "core.h"
#include "libwrap.h"
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sched.h>
#include <fcntl.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include <string.h>
#include <errno.h>
DebugLevel ncclDebugLevel;
extern "C" DSOGLOBAL
ncclResult_t ncclGetUniqueId(ncclUniqueId* out) {
pid_t pid = getpid();
static int count = 0;
int commId = __sync_fetch_and_add(&count, 1);
int len = snprintf(out->internal, NCCL_UNIQUE_ID_BYTES, "nccl-%d-%d", pid, commId);
if(strlen(out->internal) < len) {
WARN("ncclUniqueId truncated");
return ncclInternalError;
}
return ncclSuccess;
}
static ncclResult_t shmOpen(const char* shmname, size_t bytes, void** ptr) {
int fd = shm_open(shmname, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
if (fd == -1) {
WARN("shm_open failed to open %s", shmname);
return ncclSystemError;
}
if (ftruncate(fd, bytes) == -1) {
WARN("ftruncate failed to allocate %ld bytes", bytes);
shm_unlink(shmname);
close(fd);
return ncclSystemError;
}
*ptr = mmap(NULL, bytes, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (*ptr == MAP_FAILED) {
WARN("failure in mmap");
shm_unlink(shmname);
close(fd);
return ncclSystemError;
}
close(fd);
return ncclSuccess;
}
static ncclResult_t shmUnlink(const char* shmname) {
if(shm_unlink(shmname) == -1) {
WARN("smh_unlink failed");
return ncclSystemError;
} else {
return ncclSuccess;
}
}
static ncclResult_t shmUnmap(void* ptr, size_t bytes) {
if(munmap(ptr, bytes) == -1) {
WARN("munmap failed");
return ncclSystemError;
} else {
return ncclSuccess;
}
}
typedef struct {
int rank;
int ndev;
int cudaDev;
int ncclId;
pid_t pid;
ncclMem* hostptr;
ncclMem* devptr;
hipIpcMemHandle_t devipc;
size_t buffSize;
} RankEntry;
static int compRanks(const void* a, const void* b) {
const RankEntry* A = (const RankEntry*)a;
const RankEntry* B = (const RankEntry*)b;
if (A->ncclId < B->ncclId) return -1;
if (A->ncclId > B->ncclId) return 1;
return 0;
}
static void orderRanks(RankEntry* ranks, int count) {
qsort(ranks, count, sizeof(RankEntry), compRanks);
for(int i=0; i<count; ++i)
ranks[i].ncclId = i;
}
typedef struct {
union {
struct {
volatile int bar;
int ringDirectFail;
};
char pad[16];
};
RankEntry ranks[1];
} RankGather;
static ncclResult_t initGather(RankGather** gather, ncclUniqueId commId,
int ndev, int rank, RankEntry myInfo) {
size_t bytes = offsetof(RankGather, ranks) + ndev*sizeof(RankEntry);
RankGather* tmp = NULL;
int bar_tmp;
ncclResult_t res = shmOpen(commId.internal, bytes, (void**)&tmp);
if (res != ncclSuccess) {
WARN("rank %d failed to open shm segment for gather", rank);
return res;
}
tmp->ranks[rank] = myInfo;
bar_tmp = tmp->bar - 1;
bool swapped;
do {
bar_tmp += 1;
if (bar_tmp == ndev-1) { // everyone is done
ncclResult_t res = shmUnlink(commId.internal);
if (res != ncclSuccess) {
WARN("rank %d failed to unlink shm segment for gather", rank);
shmUnmap(tmp, bytes);
return res;
}
orderRanks(tmp->ranks, ndev);
}
swapped = __sync_bool_compare_and_swap(&tmp->bar, bar_tmp, bar_tmp+1);
} while(!swapped);
while (tmp->bar < ndev)
sched_yield();
__sync_synchronize();
*gather = tmp;
return ncclSuccess;
}
static void syncRingDirect(RankGather* gather, int* ringDirectOk) {
int bar_tmp = gather->bar - 1;
int ndev = gather->ranks[0].ndev;
bool swapped;
do {
bar_tmp += 1;
swapped = __sync_bool_compare_and_swap(&gather->bar, bar_tmp, bar_tmp+1);
} while(!swapped);
while (gather->bar < 2*ndev) // Wait for all ranks to arrive at this second barrier
sched_yield();
__sync_synchronize();
*ringDirectOk = gather->ringDirectFail ? 0 : 1;
}
static ncclResult_t closeGather(RankGather* gather, int ndev) {
int bar_tmp = gather->bar - 1;
bool swapped;
do {
bar_tmp += 1;
swapped = __sync_bool_compare_and_swap(&gather->bar, bar_tmp, bar_tmp+1);
} while(!swapped);
while (gather->bar < 3*ndev) // Wait for all ranks to arrive at this third barrier
sched_yield();
__sync_synchronize();
size_t bytes = offsetof(RankGather, ranks) + ndev*sizeof(RankEntry);
ncclResult_t res = shmUnmap(gather, bytes);
if (res != ncclSuccess) {
WARN("failed to unmap %ld bytes of gather", bytes);
return res;
}
return ncclSuccess;
}
static ncclResult_t allocDevMem(ncclMem** ptr, size_t buffSize) {
size_t size = offsetof(struct ncclMem, buff) + buffSize;
hipError_t res = hipMalloc((void**)ptr, size);
if (res != hipSuccess) {
*ptr = NULL;
WARN("failed to allocate %lu byte device buffer", size);
return ncclCudaMallocFailed;
}
if (hipMemset(*ptr, 0, size) != hipSuccess) {
WARN("failed to memset device buffer.");
hipFree(*ptr);
*ptr = NULL;
return ncclUnhandledCudaError;
}
return ncclSuccess;
}
static const int ShmMapped = 1;
static const int ShmLinked = 2;
static ncclResult_t allocHostMem(ncclMem** ptr, size_t buffSize) {
size_t size = offsetof(struct ncclMem, buff) + buffSize;
hipError_t res = hipHostMalloc((void**)ptr, size);
if (res != hipSuccess) {
*ptr = NULL;
WARN("failed to allocate %lu byte host buffer", size);
return ncclSystemError;
}
memset(*ptr, 0, size);
return ncclSuccess;
}
static ncclResult_t openHostMemShm(const char* shmname, ncclMem** ptr, size_t buffSize) {
size_t size = offsetof(struct ncclMem, buff) + buffSize;
ncclResult_t res = shmOpen(shmname, size, (void**)ptr);
if (res != ncclSuccess) {
WARN("failed to allocate %lu byte shm buffer", size);
*ptr = NULL;
return res;
}
if(hipHostRegister(*ptr, size, hipHostRegisterMapped) != hipSuccess) {
WARN("failed to register host buffer");
shmUnlink(shmname);
shmUnmap(*ptr, size);
*ptr = NULL;
return ncclUnhandledCudaError;
}
return ncclSuccess;
}
static ncclResult_t populateRankInfo(RankEntry* info, int rank, ncclComm_t comm) {
char busId[13];
uint32_t nvmlHandle;
hipError_t res = hipDeviceGetPCIBusId(busId, 13, comm->cudaDev);
if (res == hipErrorInvalidDevice) {
WARN("rank %d attempted to access an invalid cuda device %d", rank, comm->cudaDev);
return ncclInvalidDeviceIndex;
} else if (res != hipSuccess) {
WARN("rank %d failed to get PCI Bus Id for device %d", rank, comm->cudaDev);
return ncclUnhandledCudaError;
}
INFO("rank %d using device %d (%s)", rank, comm->cudaDev, busId);
if (wrapNvmlDeviceGetHandleByPciBusId(busId, &nvmlHandle) != ncclSuccess) {
WARN("rank %d failed to get nvml handle for device %s", rank, busId);
return ncclUnhandledCudaError;
}
// Order by nvml index
if (wrapNvmlDeviceGetIndex(nvmlHandle, (unsigned*)&info->ncclId) != ncclSuccess) {
WARN("rank %d failed to get nvml device index for device %d", rank, comm->cudaDev);
return ncclUnhandledCudaError;
}
info->rank = rank;
info->ndev = comm->nDev;
info->cudaDev = comm->cudaDev;
info->pid = getpid();
info->buffSize = comm->buffSize;
info->hostptr = comm->hostMem;
info->devptr = comm->devMem;
if (hipIpcGetMemHandle(&info->devipc, (void*)comm->devMem) != hipSuccess) {
WARN("rank %d failed to open CUDA IPC handle", rank);
return ncclUnhandledCudaError;
}
return ncclSuccess;
}
static const int CLEANUP_NONE = 0;
static const int CLEANUP_CUIPC = 1;
static const int CLEANUP_UNMAP = 2;
static ncclResult_t commClearMaps(ncclComm_t comm) {
ncclResult_t res, retval = ncclSuccess;
hipError_t cures;
for(int d=0; d<comm->nDev; ++d) {
switch(comm->ptrs[d].remoteCleanup) {
case CLEANUP_NONE:
break;
case CLEANUP_CUIPC:
cures = hipIpcCloseMemHandle((void*)comm->ptrs[d].cleanupHandle);
if (cures != hipSuccess) {
WARN("rank %d failed to close IPC handle to rank %d",
comm->userFromRing[comm->ncclId], comm->userFromRing[d]);
retval = (retval == ncclSuccess) ? ncclUnhandledCudaError : retval;
}
break;
case CLEANUP_UNMAP:
cures = hipHostUnregister(comm->ptrs[d].cleanupHandle);
if (cures != hipSuccess) {
WARN("rank %d failed to unregister handle to rank %d",
comm->userFromRing[comm->ncclId], comm->userFromRing[d]);
retval = (retval == ncclSuccess) ? ncclUnhandledCudaError : retval;
}
res = shmUnmap(comm->ptrs[d].cleanupHandle, offsetof(ncclMem, buff) + comm->buffSize);
if (res != ncclSuccess) {
WARN("rank %d failed to unmap handle to rank %d",
comm->userFromRing[comm->ncclId], comm->userFromRing[d]);
retval = (retval == ncclSuccess) ? res : retval;
}
break;
default:
WARN("Unknown cleanup type %d", comm->ptrs[d].remoteCleanup);
}
comm->ptrs[d].remoteCleanup = CLEANUP_NONE;
comm->ptrs[d].cleanupHandle = NULL;
}
if (comm->userFromRing != NULL)
memset(comm->userFromRing, 0, sizeof(int)*comm->nDev);
if (comm->ringFromUser != NULL)
memset(comm->ringFromUser, 0, sizeof(int)*comm->nDev);
if (comm->devUserFromRing != NULL) {
hipError_t err = hipMemset(comm->devUserFromRing, 0, sizeof(int)*comm->nDev);
if (err != hipSuccess) {
WARN("Faild to clear dev map: %s", hipGetErrorString(err));
retval = (retval == ncclSuccess) ? ncclUnhandledCudaError : retval;
}
}
return retval;
}
static ncclResult_t commBuildMaps(ncclComm_t comm, ncclUniqueId* commId, int rank, RankEntry* ranks, int* ringDirectFailed) {
int ndev = comm->nDev;
for(int i=0; i<ndev; ++i) {
// Check for inconsistencies between ranks
// If two ranks use the same rank, then one slot of
// ranks[] will be left unset with zero ndev/buffSize.
if (ranks[i].buffSize != comm->buffSize
|| ranks[i].ndev != comm->nDev) {
commClearMaps(comm);
return ncclRankMismatch;
}
// Create rank<->nccl maps
int iRank = ranks[i].rank;
comm->userFromRing[i] = iRank;
comm->ringFromUser[iRank] = i;
}
if (hipMemcpy(comm->devUserFromRing, comm->userFromRing, ndev*sizeof(int),
hipMemcpyHostToDevice) != hipSuccess) {
WARN("rank %d failed to copy maps to device", rank);
commClearMaps(comm);
return ncclUnhandledCudaError;
}
int myId = -1;
for (int i=0; i<ndev; ++i) {
if(ranks[i].rank == rank) {
myId = i;
break;
}
}
if (myId == -1) {
WARN("rank %d not found in communicator", rank);
return ncclInvalidRank;
}
comm->ncclId = myId;
int myDev = ranks[myId].cudaDev;
pid_t myPid = ranks[myId].pid;
comm->useRemoteRecv = 1; // Assume we directly write to result ptrs.
// The order that we link with peers must ensure that
// P2P slots are used for high-priority links first.
for (int j=0; j<ndev; ++j) {
int i = (myId - 1 + ndev + j) % ndev;
int iRank = ranks[i].rank;
int iDev = ranks[i].cudaDev;
pid_t iPid = ranks[i].pid;
int canpeer = 0;
if (hipDeviceCanAccessPeer(&canpeer, myDev, iDev) != hipSuccess) {
INFO("peer query failed between rank %d (dev %d) and rank %d (dev %d)",
rank, myDev, iRank, iDev);
canpeer = 0;
}
if (iPid == myPid) {
if (myDev == iDev) {
INFO("rank access %d -> %d via common device", rank, iRank);
comm->ptrs[i].local = ranks[myId].devptr;
comm->ptrs[i].remote = ranks[i].devptr;
comm->ptrs[i].remoteCleanup = CLEANUP_NONE;
} else {
int peer_enabled = canpeer;
if (canpeer) {
hipError_t p2pErr = hipDeviceEnablePeerAccess(iDev, 0);
if (p2pErr == hipErrorPeerAccessAlreadyEnabled) {
hipGetLastError();
} else if (p2pErr != hipSuccess) {
INFO("peer access failed between rank %d (dev %d) and rank %d (dev %d)\n",
rank, myDev, iRank, iDev);
peer_enabled = 0;
}
}
if (peer_enabled) {
INFO("rank access %d -> %d via P2P device mem", rank, iRank);
comm->ptrs[i].local = ranks[myId].devptr;
comm->ptrs[i].remote = ranks[i].devptr;
comm->ptrs[i].remoteCleanup = CLEANUP_NONE;
} else { // go through hostmem
INFO("rank access %d -> %d via zero-copy host mem", rank, iRank);
if (j <= 2)
*ringDirectFailed = 1;
if (hipHostGetDevicePointer(&comm->ptrs[i].local, ranks[myId].hostptr, 0) != hipSuccess) {
WARN("rank %d failed to map zero copy buffer to device", rank);
commClearMaps(comm);
return ncclUnhandledCudaError;
}
if (hipHostGetDevicePointer(&comm->ptrs[i].remote, ranks[i].hostptr, 0) != hipSuccess) {
WARN("rank %d failed to map %d's zero copy buffer to device", rank, iRank);
commClearMaps(comm);
return ncclUnhandledCudaError;
}
comm->ptrs[i].remoteCleanup = CLEANUP_NONE;
}
}
} else { // multi-process!
*ringDirectFailed = 1;
if (canpeer || myDev == iDev) {
INFO("rank access %d -> %d via Ipc P2P device mem", rank, iRank);
comm->ptrs[i].local = ranks[myId].devptr;
if (hipIpcOpenMemHandle((void**)(&comm->ptrs[i].remote),
ranks[i].devipc, hipIpcMemLazyEnablePeerAccess) != hipSuccess) {
WARN("rank %d failed to open Ipc handle to rank %d", rank, iRank);
commClearMaps(comm);
return ncclUnhandledCudaError;
}
comm->ptrs[i].remoteCleanup = CLEANUP_CUIPC;
comm->ptrs[i].cleanupHandle = comm->ptrs[i].remote;
} else { // go through hostmem
INFO("rank access %d -> %d via zero copy host shm", rank, iRank);
if (hipHostGetDevicePointer(&comm->ptrs[i].local, ranks[myId].hostptr, 0) != hipSuccess) {
WARN("rank %d failed to obtain dev ptr to sysmem buffer", rank);
commClearMaps(comm);
return ncclUnhandledCudaError;
}
char rankname[1024];
sprintf(rankname, "%s-%d", commId->internal, ranks[i].rank);
if (openHostMemShm(rankname, (ncclMem**)&comm->ptrs[i].cleanupHandle, ranks[i].buffSize)
!= ncclSuccess) {
WARN("rank %d failed to open sysmem buffer of rank %d", rank, iRank);
commClearMaps(comm);
return ncclUnhandledCudaError;
}
if (hipHostGetDevicePointer(&comm->ptrs[i].remote, comm->ptrs[i].cleanupHandle, 0) != hipSuccess) {
WARN("rank %d failed to obtain dev ptr for rank %d", rank, iRank);
commClearMaps(comm);
return ncclUnhandledCudaError;
}
comm->ptrs[i].remoteCleanup = CLEANUP_UNMAP;
}
}
}
return ncclSuccess;
}
static void initDebug() {
const char* nccl_debug = getenv("NCCL_DEBUG");
if (nccl_debug == NULL) {
ncclDebugLevel = NONE;
} else if (strcmp(nccl_debug, "VERSION") == 0) {
ncclDebugLevel = VERSION;
} else if (strcmp(nccl_debug, "WARN") == 0) {
ncclDebugLevel = WARN;
} else if (strcmp(nccl_debug, "INFO") == 0) {
ncclDebugLevel = INFO;
INFO("NCCL debug level set to INFO");
} else if (strcmp(nccl_debug, "ABORT") == 0) {
ncclDebugLevel = ABORT;
INFO("NCCL debug level set to ABORT");
}
}
static void commFree(ncclComm_t comm) {
if (comm == NULL)
return;
for(int i=0; i<MAXQUEUE; ++i) {
if (comm->events.isDone[i] != NULL)
if (hipEventDestroy(comm->events.isDone[i]) != hipSuccess)
INFO("failed to destroy cuda event %d", i);
}
ncclResult_t res = commClearMaps(comm);
if (res != ncclSuccess)
INFO("failed to cleanup comm maps");
if (comm->userFromRing != NULL)
free(comm->userFromRing);
if (comm->devUserFromRing != NULL)
if (hipFree(comm->devUserFromRing) != hipSuccess)
INFO("commFree failed to free dev maps");
if (comm->ringFromUser != NULL)
free(comm->ringFromUser);
if (comm->devMem != NULL && hipFree(comm->devMem) != hipSuccess)
INFO("Failed to free devMap");
if (comm->hostMem != NULL) {
if (comm->hostMemState & ShmMapped) {
if (hipHostUnregister(comm->hostMem) != hipSuccess)
INFO("Failed to unregister hostMem");
size_t size = offsetof(ncclMem, buff) + comm->buffSize;
if (shmUnmap(comm->hostMem, size) != ncclSuccess)
INFO("Failed to unmap hostMem");
comm->hostMemState ^= ShmMapped;
} else {
hipHostFree(comm->hostMem);
}
}
free(comm);
}
static ncclResult_t commAlloc(ncclComm_t* comret, int ndev, const ncclUniqueId* commId, int rank) {
if (ndev < 1) {
WARN("invalid device count (%d) requested", ndev);
return ncclUnsupportedDeviceCount;
}
if (rank >= ndev || rank < 0) {
WARN("rank %d exceeds ndev=%d", rank, ndev);
return ncclInvalidRank;
}
size_t commBytes = offsetof(ncclComm, ptrs) + ndev*sizeof(ncclNodeRef);
struct ncclComm* comm = (struct ncclComm*)malloc(commBytes);
if (comm == NULL) {
WARN("comm allocation failed");
return ncclSystemError;
}
memset(comm, 0, commBytes);
comm->nDev = ndev;
hipGetDevice(&comm->cudaDev);
const char* str = getenv("NCCL_BUFFSIZE");
if (str != NULL) {
errno = 0;
comm->buffSize = strtol(str, NULL, 10);
if (errno == ERANGE || comm->buffSize == 0) {
INFO("rank %d invalid NCCL_BUFFSIZE: %s, using default %lu",
rank, str, DEFAULT_BUFFER_SIZE_BYTES);
comm->buffSize = DEFAULT_BUFFER_SIZE_BYTES;
}
} else {
comm->buffSize = DEFAULT_BUFFER_SIZE_BYTES;
}
INFO("rank %d using buffSize = %lu", rank, comm->buffSize);
ncclResult_t res;
res = allocDevMem(&comm->devMem, comm->buffSize);
if (res != ncclSuccess) {
WARN("rank %d failed to allocate device buffer", rank);
commFree(comm);
return res;
}
if (hipMalloc(&comm->devUserFromRing, ndev*sizeof(int)) != hipSuccess) {
WARN("rank %d failed to allocated device maps", rank);
commFree(comm);
return ncclCudaMallocFailed;
}
comm->userFromRing = (int*)malloc(ndev*sizeof(int));
if (comm->userFromRing == NULL) {
WARN("rank %d failed to allocate host maps", rank);
commFree(comm);
return ncclSystemError;
}
comm->ringFromUser = (int*)malloc(ndev*sizeof(int));
if (comm->ringFromUser == NULL) {
WARN("rank %d failed to allocate host maps", rank);
commFree(comm);
return ncclSystemError;
}
EventQueue* eq = &comm->events;
for(int i=0; i<MAXQUEUE; ++i) {
if (hipEventCreateWithFlags(eq->isDone+i, hipEventDisableTiming) != hipSuccess) {
WARN("rank %d failed to create nccl event %d", rank, i);
commFree(comm);
return ncclUnhandledCudaError;
}
}
if(commId == NULL) {
comm->hostMemState = 0;
res = allocHostMem(&comm->hostMem, comm->buffSize);
} else {
char rankname[1024];
sprintf(rankname, "%s-%d", commId->internal, rank);
res = openHostMemShm(rankname, &comm->hostMem, comm->buffSize);
if (res != ncclSuccess) {
WARN("rank %d failed to allocate host buffer", rank);
commFree(comm);
return res;
}
comm->hostMemState = ShmMapped | ShmLinked;
}
*comret = comm;
return ncclSuccess;
}
static ncclResult_t commUnlinkHostMem(ncclComm_t comm, ncclUniqueId commId, int rank) {
char rankname[1024];
sprintf(rankname, "%s-%d", commId.internal, rank);
if (comm->hostMemState & ShmLinked)
comm->hostMemState ^= ShmLinked;
return shmUnlink(rankname);
}
static void showVersion() {
static int shown = 0;
if (shown == 0 && ncclDebugLevel >= VERSION) {
printf("NCCL version %d.%d.%d compiled with CUDA %d.%d\n", NCCL_MAJOR, NCCL_MINOR, NCCL_PATCH, CUDA_MAJOR, CUDA_MINOR);
fflush(stdout); \
shown = 1;
}
}
extern "C" DSOGLOBAL
ncclResult_t ncclCommInitRank(ncclComm_t* newcomm, int ndev, ncclUniqueId commId, int myrank) {
if (myrank == 0) showVersion();
if (strlen(commId.internal) < 1 ||
strlen(commId.internal) >= NCCL_UNIQUE_ID_BYTES) {
WARN("rank %d invalid commId", myrank);
return ncclInvalidArgument;
}
initDebug();
ncclResult_t res;
RankEntry myStuff;
RankGather* gath = NULL;
res = wrapSymbols();
if (res != ncclSuccess) {
WARN("NCCL failed to initialize client libs");
return res;
}
res = wrapNvmlInit();
if (res != ncclSuccess) {
WARN("rank %d failed to initialize nvml", myrank);
return res;
}
res = commAlloc(newcomm, ndev, &commId, myrank);
if (res != ncclSuccess) {
WARN("rank %d failed to allocate communicator", myrank);
return res;
}
res = populateRankInfo(&myStuff, myrank, *newcomm);
if (res != ncclSuccess) {
WARN("rank %d failed to obtain rank info", myrank);
goto cleanup;
}
res = initGather(&gath, commId, ndev, myrank, myStuff);
if (res != ncclSuccess) {
WARN("rank %d failed to gather rank info", myrank);
goto cleanup;
}
res = commBuildMaps(*newcomm, &commId, myrank, gath->ranks, &gath->ringDirectFail);
if (res != ncclSuccess) {
WARN("rank %d failed to build comm maps", myrank);
goto cleanup;
}
syncRingDirect(gath, &((*newcomm)->useRemoteRecv));
INFO("PushToRecv algos are %s\n", (*newcomm)->useRemoteRecv ? "enabled" : "disabled");
res = closeGather(gath, ndev); // includes a barrier
gath = NULL;
if (res != ncclSuccess) {
WARN("rank %d failed to close gather", myrank);
goto cleanup;
}
goto final;
cleanup:
if (gath != NULL)
closeGather(gath, ndev);
commFree(*newcomm);
final:
if ((*newcomm)->hostMemState & ShmLinked) {
if (commUnlinkHostMem(*newcomm, commId, myrank) != ncclSuccess)
INFO("rank %d failed to unlink host mem shm segment", myrank);
}
if (wrapNvmlShutdown() != ncclSuccess)
INFO("rank %d did not shutdown nvml properly", myrank);
return res;
}
extern "C" DSOGLOBAL
ncclResult_t ncclCommInitAll(ncclComm_t* comms, int ndev, const int* devlist) {
initDebug();
showVersion();
ncclResult_t res;
int savedDevice;
RankEntry* ranks = NULL;
int rank, cudaDev;
ncclComm_t comm = NULL;
char busId[13];
uint32_t nvmlHandle;
int affinity_set = 0;
int ringDirectFail = 0; // Assume direct access to recv ptr OK
res = wrapSymbols();
if (res != ncclSuccess) {
WARN("NCCL failed to initialize client libs");
return res;
}
hipGetDevice(&savedDevice);
ranks = (RankEntry*)malloc(ndev*sizeof(RankEntry));
if (ranks == NULL) {
WARN("NCCL allocation failed");
return ncclSystemError;
}
memset(ranks, 0, ndev*sizeof(RankEntry));
res = wrapNvmlInit();
if (res != ncclSuccess) {
WARN("nccl failed to initialize nvml");
return res;
}
for(rank=0; rank<ndev; ++rank)
comms[rank] = NULL;
for (rank=0; rank<ndev; ++rank) {
cudaDev = (devlist == NULL) ? rank : devlist[rank];
if (hipSetDevice(cudaDev) != hipSuccess) {
WARN("rank %d failed to set cuda device %d", rank, cudaDev);
res = ncclInvalidDeviceIndex;
goto cleanup;
}
// Set CPU affinity
affinity_set = 0;
if (hipDeviceGetPCIBusId(busId, 13, cudaDev) != hipSuccess) {
INFO("rank %d failed to get PCI Bus Id for device %d", rank, cudaDev);
goto skipaffinity;
}
if (wrapNvmlDeviceGetHandleByPciBusId(busId, &nvmlHandle) != ncclSuccess) {
INFO("rank %d failed to get nvml handle for device %s", rank, busId);
goto skipaffinity;
}
if (wrapNvmlDeviceSetCpuAffinity(nvmlHandle) != ncclSuccess) {
INFO("rank %d failed to set affinity", rank);
goto skipaffinity;
}
affinity_set = 1;
skipaffinity:
res = commAlloc(&comm, ndev, NULL, rank);
if (res != ncclSuccess) {
WARN("rank %d failed to allocate communicator", rank);
goto cleanup;
}
comms[rank] = comm;
if (affinity_set && wrapNvmlDeviceClearCpuAffinity(nvmlHandle) != ncclSuccess) {
INFO("rank %d set but failed to clear cpu affinity", rank);
}
res = populateRankInfo(ranks+rank, rank, comm);
if (res != ncclSuccess) {
WARN("rank %d failed to obtain rank info", rank);
goto cleanup;
}
}
orderRanks(ranks, ndev);
for(rank=0; rank<ndev; ++rank) {
comm = comms[rank];
hipSetDevice(comm->cudaDev);
res = commBuildMaps(comm, NULL, rank, ranks, &ringDirectFail);
if (res != ncclSuccess) {
WARN("rank %d failed to build comm maps", rank);
goto cleanup;
}
}
INFO("PushToRecv algos are %s\n", (ringDirectFail) ? "disabled" : "enabled");
for(rank=0; rank<ndev; ++rank) {
comms[rank]->useRemoteRecv = ringDirectFail ? 0 : 1;
}
free(ranks);
ranks = NULL;
res = ncclSuccess;
goto final;
cleanup:
if (ranks != NULL)
free(ranks);
for(rank=0; rank<ndev; ++rank) {
if(comms[rank] != NULL) {
commFree(comms[rank]);
}
}
final:
if(wrapNvmlShutdown() != ncclSuccess)
INFO("NCCL did not shutdown nvml properly");
hipSetDevice(savedDevice);
return res;
}
extern "C" DSOGLOBAL
void ncclCommDestroy(ncclComm_t comm) {
if (comm == NULL)
return;
int savedDevice;
hipGetDevice(&savedDevice);
int commDevice = comm->cudaDev;
if (savedDevice != commDevice) {
CUDACHECK(hipSetDevice(commDevice));
}
commFree(comm);
if (savedDevice != commDevice)
hipSetDevice(savedDevice);
}
extern "C" DSOGLOBAL
const char* ncclGetErrorString(ncclResult_t code) {
switch (code) {
case ncclSuccess : return "no error";
case ncclUnhandledCudaError : return "unhandled cuda error";
case ncclSystemError : return "system error";
case ncclInternalError : return "internal error";
case ncclInvalidDevicePointer : return "invalid device pointer";
case ncclInvalidRank : return "invalid rank";
case ncclUnsupportedDeviceCount : return "unsupported device count";
case ncclDeviceNotFound : return "device not found";
case ncclInvalidDeviceIndex : return "invalid device index";
case ncclLibWrapperNotSet : return "lib wrapper not initialized";
case ncclCudaMallocFailed : return "cuda malloc failed";
case ncclRankMismatch : return "parameter mismatch between ranks";
case ncclInvalidArgument : return "invalid argument";
case ncclInvalidType : return "invalid data type";
case ncclInvalidOperation : return "invalid reduction operations";
}
return "unknown result code";
}
extern "C" DSOGLOBAL
ncclResult_t ncclCommCount(const ncclComm_t comm, int* count) {
*count = comm->nDev;
return ncclSuccess;
}
extern "C" DSOGLOBAL
ncclResult_t ncclCommCuDevice(const ncclComm_t comm, int* devid) {
*devid = comm->cudaDev;
return ncclSuccess;
}
extern "C" DSOGLOBAL
ncclResult_t ncclCommUserRank(const ncclComm_t comm, int* rank) {
*rank = comm->userFromRing[comm->ncclId];
return ncclSuccess;
}
|
9e3e99e819979c03c22c5afb7d2a33633038863f.cu
|
/*************************************************************************
* Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "core.h"
#include "libwrap.h"
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sched.h>
#include <fcntl.h>
#include <unistd.h>
#include <cuda_runtime.h>
#include <string.h>
#include <errno.h>
DebugLevel ncclDebugLevel;
extern "C" DSOGLOBAL
ncclResult_t ncclGetUniqueId(ncclUniqueId* out) {
pid_t pid = getpid();
static int count = 0;
int commId = __sync_fetch_and_add(&count, 1);
int len = snprintf(out->internal, NCCL_UNIQUE_ID_BYTES, "nccl-%d-%d", pid, commId);
if(strlen(out->internal) < len) {
WARN("ncclUniqueId truncated");
return ncclInternalError;
}
return ncclSuccess;
}
static ncclResult_t shmOpen(const char* shmname, size_t bytes, void** ptr) {
int fd = shm_open(shmname, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
if (fd == -1) {
WARN("shm_open failed to open %s", shmname);
return ncclSystemError;
}
if (ftruncate(fd, bytes) == -1) {
WARN("ftruncate failed to allocate %ld bytes", bytes);
shm_unlink(shmname);
close(fd);
return ncclSystemError;
}
*ptr = mmap(NULL, bytes, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (*ptr == MAP_FAILED) {
WARN("failure in mmap");
shm_unlink(shmname);
close(fd);
return ncclSystemError;
}
close(fd);
return ncclSuccess;
}
static ncclResult_t shmUnlink(const char* shmname) {
if(shm_unlink(shmname) == -1) {
WARN("smh_unlink failed");
return ncclSystemError;
} else {
return ncclSuccess;
}
}
static ncclResult_t shmUnmap(void* ptr, size_t bytes) {
if(munmap(ptr, bytes) == -1) {
WARN("munmap failed");
return ncclSystemError;
} else {
return ncclSuccess;
}
}
typedef struct {
int rank;
int ndev;
int cudaDev;
int ncclId;
pid_t pid;
ncclMem* hostptr;
ncclMem* devptr;
cudaIpcMemHandle_t devipc;
size_t buffSize;
} RankEntry;
static int compRanks(const void* a, const void* b) {
const RankEntry* A = (const RankEntry*)a;
const RankEntry* B = (const RankEntry*)b;
if (A->ncclId < B->ncclId) return -1;
if (A->ncclId > B->ncclId) return 1;
return 0;
}
static void orderRanks(RankEntry* ranks, int count) {
qsort(ranks, count, sizeof(RankEntry), compRanks);
for(int i=0; i<count; ++i)
ranks[i].ncclId = i;
}
typedef struct {
union {
struct {
volatile int bar;
int ringDirectFail;
};
char pad[16];
};
RankEntry ranks[1];
} RankGather;
static ncclResult_t initGather(RankGather** gather, ncclUniqueId commId,
int ndev, int rank, RankEntry myInfo) {
size_t bytes = offsetof(RankGather, ranks) + ndev*sizeof(RankEntry);
RankGather* tmp = NULL;
int bar_tmp;
ncclResult_t res = shmOpen(commId.internal, bytes, (void**)&tmp);
if (res != ncclSuccess) {
WARN("rank %d failed to open shm segment for gather", rank);
return res;
}
tmp->ranks[rank] = myInfo;
bar_tmp = tmp->bar - 1;
bool swapped;
do {
bar_tmp += 1;
if (bar_tmp == ndev-1) { // everyone is done
ncclResult_t res = shmUnlink(commId.internal);
if (res != ncclSuccess) {
WARN("rank %d failed to unlink shm segment for gather", rank);
shmUnmap(tmp, bytes);
return res;
}
orderRanks(tmp->ranks, ndev);
}
swapped = __sync_bool_compare_and_swap(&tmp->bar, bar_tmp, bar_tmp+1);
} while(!swapped);
while (tmp->bar < ndev)
sched_yield();
__sync_synchronize();
*gather = tmp;
return ncclSuccess;
}
static void syncRingDirect(RankGather* gather, int* ringDirectOk) {
int bar_tmp = gather->bar - 1;
int ndev = gather->ranks[0].ndev;
bool swapped;
do {
bar_tmp += 1;
swapped = __sync_bool_compare_and_swap(&gather->bar, bar_tmp, bar_tmp+1);
} while(!swapped);
while (gather->bar < 2*ndev) // Wait for all ranks to arrive at this second barrier
sched_yield();
__sync_synchronize();
*ringDirectOk = gather->ringDirectFail ? 0 : 1;
}
static ncclResult_t closeGather(RankGather* gather, int ndev) {
int bar_tmp = gather->bar - 1;
bool swapped;
do {
bar_tmp += 1;
swapped = __sync_bool_compare_and_swap(&gather->bar, bar_tmp, bar_tmp+1);
} while(!swapped);
while (gather->bar < 3*ndev) // Wait for all ranks to arrive at this third barrier
sched_yield();
__sync_synchronize();
size_t bytes = offsetof(RankGather, ranks) + ndev*sizeof(RankEntry);
ncclResult_t res = shmUnmap(gather, bytes);
if (res != ncclSuccess) {
WARN("failed to unmap %ld bytes of gather", bytes);
return res;
}
return ncclSuccess;
}
static ncclResult_t allocDevMem(ncclMem** ptr, size_t buffSize) {
size_t size = offsetof(struct ncclMem, buff) + buffSize;
cudaError_t res = cudaMalloc((void**)ptr, size);
if (res != cudaSuccess) {
*ptr = NULL;
WARN("failed to allocate %lu byte device buffer", size);
return ncclCudaMallocFailed;
}
if (cudaMemset(*ptr, 0, size) != cudaSuccess) {
WARN("failed to memset device buffer.");
cudaFree(*ptr);
*ptr = NULL;
return ncclUnhandledCudaError;
}
return ncclSuccess;
}
static const int ShmMapped = 1;
static const int ShmLinked = 2;
static ncclResult_t allocHostMem(ncclMem** ptr, size_t buffSize) {
size_t size = offsetof(struct ncclMem, buff) + buffSize;
cudaError_t res = cudaMallocHost((void**)ptr, size);
if (res != cudaSuccess) {
*ptr = NULL;
WARN("failed to allocate %lu byte host buffer", size);
return ncclSystemError;
}
memset(*ptr, 0, size);
return ncclSuccess;
}
static ncclResult_t openHostMemShm(const char* shmname, ncclMem** ptr, size_t buffSize) {
size_t size = offsetof(struct ncclMem, buff) + buffSize;
ncclResult_t res = shmOpen(shmname, size, (void**)ptr);
if (res != ncclSuccess) {
WARN("failed to allocate %lu byte shm buffer", size);
*ptr = NULL;
return res;
}
if(cudaHostRegister(*ptr, size, cudaHostRegisterMapped) != cudaSuccess) {
WARN("failed to register host buffer");
shmUnlink(shmname);
shmUnmap(*ptr, size);
*ptr = NULL;
return ncclUnhandledCudaError;
}
return ncclSuccess;
}
static ncclResult_t populateRankInfo(RankEntry* info, int rank, ncclComm_t comm) {
char busId[13];
nvmlDevice_t nvmlHandle;
cudaError_t res = cudaDeviceGetPCIBusId(busId, 13, comm->cudaDev);
if (res == cudaErrorInvalidDevice) {
WARN("rank %d attempted to access an invalid cuda device %d", rank, comm->cudaDev);
return ncclInvalidDeviceIndex;
} else if (res != cudaSuccess) {
WARN("rank %d failed to get PCI Bus Id for device %d", rank, comm->cudaDev);
return ncclUnhandledCudaError;
}
INFO("rank %d using device %d (%s)", rank, comm->cudaDev, busId);
if (wrapNvmlDeviceGetHandleByPciBusId(busId, &nvmlHandle) != ncclSuccess) {
WARN("rank %d failed to get nvml handle for device %s", rank, busId);
return ncclUnhandledCudaError;
}
// Order by nvml index
if (wrapNvmlDeviceGetIndex(nvmlHandle, (unsigned*)&info->ncclId) != ncclSuccess) {
WARN("rank %d failed to get nvml device index for device %d", rank, comm->cudaDev);
return ncclUnhandledCudaError;
}
info->rank = rank;
info->ndev = comm->nDev;
info->cudaDev = comm->cudaDev;
info->pid = getpid();
info->buffSize = comm->buffSize;
info->hostptr = comm->hostMem;
info->devptr = comm->devMem;
if (cudaIpcGetMemHandle(&info->devipc, (void*)comm->devMem) != cudaSuccess) {
WARN("rank %d failed to open CUDA IPC handle", rank);
return ncclUnhandledCudaError;
}
return ncclSuccess;
}
static const int CLEANUP_NONE = 0;
static const int CLEANUP_CUIPC = 1;
static const int CLEANUP_UNMAP = 2;
static ncclResult_t commClearMaps(ncclComm_t comm) {
ncclResult_t res, retval = ncclSuccess;
cudaError_t cures;
for(int d=0; d<comm->nDev; ++d) {
switch(comm->ptrs[d].remoteCleanup) {
case CLEANUP_NONE:
break;
case CLEANUP_CUIPC:
cures = cudaIpcCloseMemHandle((void*)comm->ptrs[d].cleanupHandle);
if (cures != cudaSuccess) {
WARN("rank %d failed to close IPC handle to rank %d",
comm->userFromRing[comm->ncclId], comm->userFromRing[d]);
retval = (retval == ncclSuccess) ? ncclUnhandledCudaError : retval;
}
break;
case CLEANUP_UNMAP:
cures = cudaHostUnregister(comm->ptrs[d].cleanupHandle);
if (cures != cudaSuccess) {
WARN("rank %d failed to unregister handle to rank %d",
comm->userFromRing[comm->ncclId], comm->userFromRing[d]);
retval = (retval == ncclSuccess) ? ncclUnhandledCudaError : retval;
}
res = shmUnmap(comm->ptrs[d].cleanupHandle, offsetof(ncclMem, buff) + comm->buffSize);
if (res != ncclSuccess) {
WARN("rank %d failed to unmap handle to rank %d",
comm->userFromRing[comm->ncclId], comm->userFromRing[d]);
retval = (retval == ncclSuccess) ? res : retval;
}
break;
default:
WARN("Unknown cleanup type %d", comm->ptrs[d].remoteCleanup);
}
comm->ptrs[d].remoteCleanup = CLEANUP_NONE;
comm->ptrs[d].cleanupHandle = NULL;
}
if (comm->userFromRing != NULL)
memset(comm->userFromRing, 0, sizeof(int)*comm->nDev);
if (comm->ringFromUser != NULL)
memset(comm->ringFromUser, 0, sizeof(int)*comm->nDev);
if (comm->devUserFromRing != NULL) {
cudaError_t err = cudaMemset(comm->devUserFromRing, 0, sizeof(int)*comm->nDev);
if (err != cudaSuccess) {
WARN("Faild to clear dev map: %s", cudaGetErrorString(err));
retval = (retval == ncclSuccess) ? ncclUnhandledCudaError : retval;
}
}
return retval;
}
static ncclResult_t commBuildMaps(ncclComm_t comm, ncclUniqueId* commId, int rank, RankEntry* ranks, int* ringDirectFailed) {
int ndev = comm->nDev;
for(int i=0; i<ndev; ++i) {
// Check for inconsistencies between ranks
// If two ranks use the same rank, then one slot of
// ranks[] will be left unset with zero ndev/buffSize.
if (ranks[i].buffSize != comm->buffSize
|| ranks[i].ndev != comm->nDev) {
commClearMaps(comm);
return ncclRankMismatch;
}
// Create rank<->nccl maps
int iRank = ranks[i].rank;
comm->userFromRing[i] = iRank;
comm->ringFromUser[iRank] = i;
}
if (cudaMemcpy(comm->devUserFromRing, comm->userFromRing, ndev*sizeof(int),
cudaMemcpyHostToDevice) != cudaSuccess) {
WARN("rank %d failed to copy maps to device", rank);
commClearMaps(comm);
return ncclUnhandledCudaError;
}
int myId = -1;
for (int i=0; i<ndev; ++i) {
if(ranks[i].rank == rank) {
myId = i;
break;
}
}
if (myId == -1) {
WARN("rank %d not found in communicator", rank);
return ncclInvalidRank;
}
comm->ncclId = myId;
int myDev = ranks[myId].cudaDev;
pid_t myPid = ranks[myId].pid;
comm->useRemoteRecv = 1; // Assume we directly write to result ptrs.
// The order that we link with peers must ensure that
// P2P slots are used for high-priority links first.
for (int j=0; j<ndev; ++j) {
int i = (myId - 1 + ndev + j) % ndev;
int iRank = ranks[i].rank;
int iDev = ranks[i].cudaDev;
pid_t iPid = ranks[i].pid;
int canpeer = 0;
if (cudaDeviceCanAccessPeer(&canpeer, myDev, iDev) != cudaSuccess) {
INFO("peer query failed between rank %d (dev %d) and rank %d (dev %d)",
rank, myDev, iRank, iDev);
canpeer = 0;
}
if (iPid == myPid) {
if (myDev == iDev) {
INFO("rank access %d -> %d via common device", rank, iRank);
comm->ptrs[i].local = ranks[myId].devptr;
comm->ptrs[i].remote = ranks[i].devptr;
comm->ptrs[i].remoteCleanup = CLEANUP_NONE;
} else {
int peer_enabled = canpeer;
if (canpeer) {
cudaError_t p2pErr = cudaDeviceEnablePeerAccess(iDev, 0);
if (p2pErr == cudaErrorPeerAccessAlreadyEnabled) {
cudaGetLastError();
} else if (p2pErr != cudaSuccess) {
INFO("peer access failed between rank %d (dev %d) and rank %d (dev %d)\n",
rank, myDev, iRank, iDev);
peer_enabled = 0;
}
}
if (peer_enabled) {
INFO("rank access %d -> %d via P2P device mem", rank, iRank);
comm->ptrs[i].local = ranks[myId].devptr;
comm->ptrs[i].remote = ranks[i].devptr;
comm->ptrs[i].remoteCleanup = CLEANUP_NONE;
} else { // go through hostmem
INFO("rank access %d -> %d via zero-copy host mem", rank, iRank);
if (j <= 2)
*ringDirectFailed = 1;
if (cudaHostGetDevicePointer(&comm->ptrs[i].local, ranks[myId].hostptr, 0) != cudaSuccess) {
WARN("rank %d failed to map zero copy buffer to device", rank);
commClearMaps(comm);
return ncclUnhandledCudaError;
}
if (cudaHostGetDevicePointer(&comm->ptrs[i].remote, ranks[i].hostptr, 0) != cudaSuccess) {
WARN("rank %d failed to map %d's zero copy buffer to device", rank, iRank);
commClearMaps(comm);
return ncclUnhandledCudaError;
}
comm->ptrs[i].remoteCleanup = CLEANUP_NONE;
}
}
} else { // multi-process!
*ringDirectFailed = 1;
if (canpeer || myDev == iDev) {
INFO("rank access %d -> %d via Ipc P2P device mem", rank, iRank);
comm->ptrs[i].local = ranks[myId].devptr;
if (cudaIpcOpenMemHandle((void**)(&comm->ptrs[i].remote),
ranks[i].devipc, cudaIpcMemLazyEnablePeerAccess) != cudaSuccess) {
WARN("rank %d failed to open Ipc handle to rank %d", rank, iRank);
commClearMaps(comm);
return ncclUnhandledCudaError;
}
comm->ptrs[i].remoteCleanup = CLEANUP_CUIPC;
comm->ptrs[i].cleanupHandle = comm->ptrs[i].remote;
} else { // go through hostmem
INFO("rank access %d -> %d via zero copy host shm", rank, iRank);
if (cudaHostGetDevicePointer(&comm->ptrs[i].local, ranks[myId].hostptr, 0) != cudaSuccess) {
WARN("rank %d failed to obtain dev ptr to sysmem buffer", rank);
commClearMaps(comm);
return ncclUnhandledCudaError;
}
char rankname[1024];
sprintf(rankname, "%s-%d", commId->internal, ranks[i].rank);
if (openHostMemShm(rankname, (ncclMem**)&comm->ptrs[i].cleanupHandle, ranks[i].buffSize)
!= ncclSuccess) {
WARN("rank %d failed to open sysmem buffer of rank %d", rank, iRank);
commClearMaps(comm);
return ncclUnhandledCudaError;
}
if (cudaHostGetDevicePointer(&comm->ptrs[i].remote, comm->ptrs[i].cleanupHandle, 0) != cudaSuccess) {
WARN("rank %d failed to obtain dev ptr for rank %d", rank, iRank);
commClearMaps(comm);
return ncclUnhandledCudaError;
}
comm->ptrs[i].remoteCleanup = CLEANUP_UNMAP;
}
}
}
return ncclSuccess;
}
static void initDebug() {
const char* nccl_debug = getenv("NCCL_DEBUG");
if (nccl_debug == NULL) {
ncclDebugLevel = NONE;
} else if (strcmp(nccl_debug, "VERSION") == 0) {
ncclDebugLevel = VERSION;
} else if (strcmp(nccl_debug, "WARN") == 0) {
ncclDebugLevel = WARN;
} else if (strcmp(nccl_debug, "INFO") == 0) {
ncclDebugLevel = INFO;
INFO("NCCL debug level set to INFO");
} else if (strcmp(nccl_debug, "ABORT") == 0) {
ncclDebugLevel = ABORT;
INFO("NCCL debug level set to ABORT");
}
}
static void commFree(ncclComm_t comm) {
if (comm == NULL)
return;
for(int i=0; i<MAXQUEUE; ++i) {
if (comm->events.isDone[i] != NULL)
if (cudaEventDestroy(comm->events.isDone[i]) != cudaSuccess)
INFO("failed to destroy cuda event %d", i);
}
ncclResult_t res = commClearMaps(comm);
if (res != ncclSuccess)
INFO("failed to cleanup comm maps");
if (comm->userFromRing != NULL)
free(comm->userFromRing);
if (comm->devUserFromRing != NULL)
if (cudaFree(comm->devUserFromRing) != cudaSuccess)
INFO("commFree failed to free dev maps");
if (comm->ringFromUser != NULL)
free(comm->ringFromUser);
if (comm->devMem != NULL && cudaFree(comm->devMem) != cudaSuccess)
INFO("Failed to free devMap");
if (comm->hostMem != NULL) {
if (comm->hostMemState & ShmMapped) {
if (cudaHostUnregister(comm->hostMem) != cudaSuccess)
INFO("Failed to unregister hostMem");
size_t size = offsetof(ncclMem, buff) + comm->buffSize;
if (shmUnmap(comm->hostMem, size) != ncclSuccess)
INFO("Failed to unmap hostMem");
comm->hostMemState ^= ShmMapped;
} else {
cudaFreeHost(comm->hostMem);
}
}
free(comm);
}
static ncclResult_t commAlloc(ncclComm_t* comret, int ndev, const ncclUniqueId* commId, int rank) {
if (ndev < 1) {
WARN("invalid device count (%d) requested", ndev);
return ncclUnsupportedDeviceCount;
}
if (rank >= ndev || rank < 0) {
WARN("rank %d exceeds ndev=%d", rank, ndev);
return ncclInvalidRank;
}
size_t commBytes = offsetof(ncclComm, ptrs) + ndev*sizeof(ncclNodeRef);
struct ncclComm* comm = (struct ncclComm*)malloc(commBytes);
if (comm == NULL) {
WARN("comm allocation failed");
return ncclSystemError;
}
memset(comm, 0, commBytes);
comm->nDev = ndev;
cudaGetDevice(&comm->cudaDev);
const char* str = getenv("NCCL_BUFFSIZE");
if (str != NULL) {
errno = 0;
comm->buffSize = strtol(str, NULL, 10);
if (errno == ERANGE || comm->buffSize == 0) {
INFO("rank %d invalid NCCL_BUFFSIZE: %s, using default %lu",
rank, str, DEFAULT_BUFFER_SIZE_BYTES);
comm->buffSize = DEFAULT_BUFFER_SIZE_BYTES;
}
} else {
comm->buffSize = DEFAULT_BUFFER_SIZE_BYTES;
}
INFO("rank %d using buffSize = %lu", rank, comm->buffSize);
ncclResult_t res;
res = allocDevMem(&comm->devMem, comm->buffSize);
if (res != ncclSuccess) {
WARN("rank %d failed to allocate device buffer", rank);
commFree(comm);
return res;
}
if (cudaMalloc(&comm->devUserFromRing, ndev*sizeof(int)) != cudaSuccess) {
WARN("rank %d failed to allocated device maps", rank);
commFree(comm);
return ncclCudaMallocFailed;
}
comm->userFromRing = (int*)malloc(ndev*sizeof(int));
if (comm->userFromRing == NULL) {
WARN("rank %d failed to allocate host maps", rank);
commFree(comm);
return ncclSystemError;
}
comm->ringFromUser = (int*)malloc(ndev*sizeof(int));
if (comm->ringFromUser == NULL) {
WARN("rank %d failed to allocate host maps", rank);
commFree(comm);
return ncclSystemError;
}
EventQueue* eq = &comm->events;
for(int i=0; i<MAXQUEUE; ++i) {
if (cudaEventCreateWithFlags(eq->isDone+i, cudaEventDisableTiming) != cudaSuccess) {
WARN("rank %d failed to create nccl event %d", rank, i);
commFree(comm);
return ncclUnhandledCudaError;
}
}
if(commId == NULL) {
comm->hostMemState = 0;
res = allocHostMem(&comm->hostMem, comm->buffSize);
} else {
char rankname[1024];
sprintf(rankname, "%s-%d", commId->internal, rank);
res = openHostMemShm(rankname, &comm->hostMem, comm->buffSize);
if (res != ncclSuccess) {
WARN("rank %d failed to allocate host buffer", rank);
commFree(comm);
return res;
}
comm->hostMemState = ShmMapped | ShmLinked;
}
*comret = comm;
return ncclSuccess;
}
static ncclResult_t commUnlinkHostMem(ncclComm_t comm, ncclUniqueId commId, int rank) {
char rankname[1024];
sprintf(rankname, "%s-%d", commId.internal, rank);
if (comm->hostMemState & ShmLinked)
comm->hostMemState ^= ShmLinked;
return shmUnlink(rankname);
}
static void showVersion() {
static int shown = 0;
if (shown == 0 && ncclDebugLevel >= VERSION) {
printf("NCCL version %d.%d.%d compiled with CUDA %d.%d\n", NCCL_MAJOR, NCCL_MINOR, NCCL_PATCH, CUDA_MAJOR, CUDA_MINOR);
fflush(stdout); \
shown = 1;
}
}
extern "C" DSOGLOBAL
ncclResult_t ncclCommInitRank(ncclComm_t* newcomm, int ndev, ncclUniqueId commId, int myrank) {
if (myrank == 0) showVersion();
if (strlen(commId.internal) < 1 ||
strlen(commId.internal) >= NCCL_UNIQUE_ID_BYTES) {
WARN("rank %d invalid commId", myrank);
return ncclInvalidArgument;
}
initDebug();
ncclResult_t res;
RankEntry myStuff;
RankGather* gath = NULL;
res = wrapSymbols();
if (res != ncclSuccess) {
WARN("NCCL failed to initialize client libs");
return res;
}
res = wrapNvmlInit();
if (res != ncclSuccess) {
WARN("rank %d failed to initialize nvml", myrank);
return res;
}
res = commAlloc(newcomm, ndev, &commId, myrank);
if (res != ncclSuccess) {
WARN("rank %d failed to allocate communicator", myrank);
return res;
}
res = populateRankInfo(&myStuff, myrank, *newcomm);
if (res != ncclSuccess) {
WARN("rank %d failed to obtain rank info", myrank);
goto cleanup;
}
res = initGather(&gath, commId, ndev, myrank, myStuff);
if (res != ncclSuccess) {
WARN("rank %d failed to gather rank info", myrank);
goto cleanup;
}
res = commBuildMaps(*newcomm, &commId, myrank, gath->ranks, &gath->ringDirectFail);
if (res != ncclSuccess) {
WARN("rank %d failed to build comm maps", myrank);
goto cleanup;
}
syncRingDirect(gath, &((*newcomm)->useRemoteRecv));
INFO("PushToRecv algos are %s\n", (*newcomm)->useRemoteRecv ? "enabled" : "disabled");
res = closeGather(gath, ndev); // includes a barrier
gath = NULL;
if (res != ncclSuccess) {
WARN("rank %d failed to close gather", myrank);
goto cleanup;
}
goto final;
cleanup:
if (gath != NULL)
closeGather(gath, ndev);
commFree(*newcomm);
final:
if ((*newcomm)->hostMemState & ShmLinked) {
if (commUnlinkHostMem(*newcomm, commId, myrank) != ncclSuccess)
INFO("rank %d failed to unlink host mem shm segment", myrank);
}
if (wrapNvmlShutdown() != ncclSuccess)
INFO("rank %d did not shutdown nvml properly", myrank);
return res;
}
extern "C" DSOGLOBAL
ncclResult_t ncclCommInitAll(ncclComm_t* comms, int ndev, const int* devlist) {
initDebug();
showVersion();
ncclResult_t res;
int savedDevice;
RankEntry* ranks = NULL;
int rank, cudaDev;
ncclComm_t comm = NULL;
char busId[13];
nvmlDevice_t nvmlHandle;
int affinity_set = 0;
int ringDirectFail = 0; // Assume direct access to recv ptr OK
res = wrapSymbols();
if (res != ncclSuccess) {
WARN("NCCL failed to initialize client libs");
return res;
}
cudaGetDevice(&savedDevice);
ranks = (RankEntry*)malloc(ndev*sizeof(RankEntry));
if (ranks == NULL) {
WARN("NCCL allocation failed");
return ncclSystemError;
}
memset(ranks, 0, ndev*sizeof(RankEntry));
res = wrapNvmlInit();
if (res != ncclSuccess) {
WARN("nccl failed to initialize nvml");
return res;
}
for(rank=0; rank<ndev; ++rank)
comms[rank] = NULL;
for (rank=0; rank<ndev; ++rank) {
cudaDev = (devlist == NULL) ? rank : devlist[rank];
if (cudaSetDevice(cudaDev) != cudaSuccess) {
WARN("rank %d failed to set cuda device %d", rank, cudaDev);
res = ncclInvalidDeviceIndex;
goto cleanup;
}
// Set CPU affinity
affinity_set = 0;
if (cudaDeviceGetPCIBusId(busId, 13, cudaDev) != cudaSuccess) {
INFO("rank %d failed to get PCI Bus Id for device %d", rank, cudaDev);
goto skipaffinity;
}
if (wrapNvmlDeviceGetHandleByPciBusId(busId, &nvmlHandle) != ncclSuccess) {
INFO("rank %d failed to get nvml handle for device %s", rank, busId);
goto skipaffinity;
}
if (wrapNvmlDeviceSetCpuAffinity(nvmlHandle) != ncclSuccess) {
INFO("rank %d failed to set affinity", rank);
goto skipaffinity;
}
affinity_set = 1;
skipaffinity:
res = commAlloc(&comm, ndev, NULL, rank);
if (res != ncclSuccess) {
WARN("rank %d failed to allocate communicator", rank);
goto cleanup;
}
comms[rank] = comm;
if (affinity_set && wrapNvmlDeviceClearCpuAffinity(nvmlHandle) != ncclSuccess) {
INFO("rank %d set but failed to clear cpu affinity", rank);
}
res = populateRankInfo(ranks+rank, rank, comm);
if (res != ncclSuccess) {
WARN("rank %d failed to obtain rank info", rank);
goto cleanup;
}
}
orderRanks(ranks, ndev);
for(rank=0; rank<ndev; ++rank) {
comm = comms[rank];
cudaSetDevice(comm->cudaDev);
res = commBuildMaps(comm, NULL, rank, ranks, &ringDirectFail);
if (res != ncclSuccess) {
WARN("rank %d failed to build comm maps", rank);
goto cleanup;
}
}
INFO("PushToRecv algos are %s\n", (ringDirectFail) ? "disabled" : "enabled");
for(rank=0; rank<ndev; ++rank) {
comms[rank]->useRemoteRecv = ringDirectFail ? 0 : 1;
}
free(ranks);
ranks = NULL;
res = ncclSuccess;
goto final;
cleanup:
if (ranks != NULL)
free(ranks);
for(rank=0; rank<ndev; ++rank) {
if(comms[rank] != NULL) {
commFree(comms[rank]);
}
}
final:
if(wrapNvmlShutdown() != ncclSuccess)
INFO("NCCL did not shutdown nvml properly");
cudaSetDevice(savedDevice);
return res;
}
extern "C" DSOGLOBAL
void ncclCommDestroy(ncclComm_t comm) {
if (comm == NULL)
return;
int savedDevice;
cudaGetDevice(&savedDevice);
int commDevice = comm->cudaDev;
if (savedDevice != commDevice) {
CUDACHECK(cudaSetDevice(commDevice));
}
commFree(comm);
if (savedDevice != commDevice)
cudaSetDevice(savedDevice);
}
extern "C" DSOGLOBAL
const char* ncclGetErrorString(ncclResult_t code) {
switch (code) {
case ncclSuccess : return "no error";
case ncclUnhandledCudaError : return "unhandled cuda error";
case ncclSystemError : return "system error";
case ncclInternalError : return "internal error";
case ncclInvalidDevicePointer : return "invalid device pointer";
case ncclInvalidRank : return "invalid rank";
case ncclUnsupportedDeviceCount : return "unsupported device count";
case ncclDeviceNotFound : return "device not found";
case ncclInvalidDeviceIndex : return "invalid device index";
case ncclLibWrapperNotSet : return "lib wrapper not initialized";
case ncclCudaMallocFailed : return "cuda malloc failed";
case ncclRankMismatch : return "parameter mismatch between ranks";
case ncclInvalidArgument : return "invalid argument";
case ncclInvalidType : return "invalid data type";
case ncclInvalidOperation : return "invalid reduction operations";
}
return "unknown result code";
}
extern "C" DSOGLOBAL
ncclResult_t ncclCommCount(const ncclComm_t comm, int* count) {
*count = comm->nDev;
return ncclSuccess;
}
extern "C" DSOGLOBAL
ncclResult_t ncclCommCuDevice(const ncclComm_t comm, int* devid) {
*devid = comm->cudaDev;
return ncclSuccess;
}
extern "C" DSOGLOBAL
ncclResult_t ncclCommUserRank(const ncclComm_t comm, int* rank) {
*rank = comm->userFromRing[comm->ncclId];
return ncclSuccess;
}
|
190ecffbc3a4e6a20458112986571f4decc838fb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zlarfbx.cu normal z -> s, Fri Sep 11 18:29:21 2015
*/
#include "common_magma.h"
#include "commonblas_s.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
//==============================================================================
extern "C"
__global__ void
magma_sgemv_kernel1(int m, const float * __restrict__ V, int ldv,
const float * __restrict__ c,
float *dwork)
{
const int i = threadIdx.x;
const float *dV = V + (blockIdx.x) * ldv;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
/* lsum := v**H * C */
lsum = MAGMA_S_ZERO;
for (int j = i; j < m; j += BLOCK_SIZE)
lsum += MAGMA_S_MUL( MAGMA_S_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i == 0)
dwork [blockIdx.x] = sum[0];
}
//==============================================================================
/* -----------------------------------------------------------------------------
Call
magma_sgemv_kernel3<<< n, BLOCK_SIZE>>>(m, V, ldv, c, dwork, tau)
to compute
SGEMV( "Conjugate transpose", m, n, -tau[0], V, ldv, c, 1, zero, dwork, 1)
and to set c[0] to 1.
i.e.,
work = -tau[0] V**H c
----------------------------------------------------------------------------- */
extern "C"
__global__ void
magma_sgemv_kernel3(int m, const float * __restrict__ V, int ldv, float *c,
float *dwork, float *tau)
{
const int i = threadIdx.x;
const float *dV = V + (blockIdx.x) * ldv;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
if (i == 0)
c[0] = MAGMA_S_ONE;
/* lsum := v**H * C */
lsum = MAGMA_S_ZERO;
for (int j = i; j < m; j += BLOCK_SIZE)
lsum += MAGMA_S_MUL( MAGMA_S_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i == 0)
dwork [blockIdx.x] = -tau[0]*sum[0];
}
//==============================================================================
extern "C"
__global__ void
magma_sgemv_kernel2(int m, int n, const float * __restrict__ V, int ldv,
const float * __restrict__ x, float *c)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
float lsum;
V += j;
lsum = MAGMA_S_ZERO;
if (j < m) {
for (int k=0; k < n; k++)
lsum += MAGMA_S_MUL( V[k*ldv], x[k]);
c[j] -= lsum;
}
}
//==============================================================================
/*
Apply a real block reflector H to a real vector C from the left
(i.e., C = H C). H is represented in the form
H = I - V T V**H
where T is the real k-by-k upper triangular matrix in the
representation of the block reflector, and V is a real block of
k elementary reflectors.
*/
extern "C" void
magma_slarfbx_gpu(
magma_int_t m, magma_int_t k,
magmaFloat_ptr V, magma_int_t ldv,
magmaFloat_ptr dT, magma_int_t ldt,
magmaFloat_ptr c,
magmaFloat_ptr dwork)
{
/* dwork = V**H c */
hipLaunchKernelGGL(( magma_sgemv_kernel1), dim3(k), dim3(BLOCK_SIZE), 0, magma_stream , m, V, ldv, c, dwork);
/* dwork = T**H dwork */
hipLaunchKernelGGL(( magma_strmv_tkernel), dim3(k), dim3(k), 0, magma_stream , dT, ldt, dwork, dwork+k);
/* c = c - V dwork */
dim3 blocks3( magma_ceildiv( m, BLOCK_SIZE ) );
dim3 threads3( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_sgemv_kernel2), dim3(blocks3), dim3(threads3), 0, magma_stream , m, k, V, ldv, dwork+k, c);
}
//==============================================================================
|
190ecffbc3a4e6a20458112986571f4decc838fb.cu
|
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zlarfbx.cu normal z -> s, Fri Sep 11 18:29:21 2015
*/
#include "common_magma.h"
#include "commonblas_s.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
//==============================================================================
extern "C"
__global__ void
magma_sgemv_kernel1(int m, const float * __restrict__ V, int ldv,
const float * __restrict__ c,
float *dwork)
{
const int i = threadIdx.x;
const float *dV = V + (blockIdx.x) * ldv;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
/* lsum := v**H * C */
lsum = MAGMA_S_ZERO;
for (int j = i; j < m; j += BLOCK_SIZE)
lsum += MAGMA_S_MUL( MAGMA_S_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i == 0)
dwork [blockIdx.x] = sum[0];
}
//==============================================================================
/* -----------------------------------------------------------------------------
Call
magma_sgemv_kernel3<<< n, BLOCK_SIZE>>>(m, V, ldv, c, dwork, tau)
to compute
SGEMV( "Conjugate transpose", m, n, -tau[0], V, ldv, c, 1, zero, dwork, 1)
and to set c[0] to 1.
i.e.,
work = -tau[0] V**H c
----------------------------------------------------------------------------- */
extern "C"
__global__ void
magma_sgemv_kernel3(int m, const float * __restrict__ V, int ldv, float *c,
float *dwork, float *tau)
{
const int i = threadIdx.x;
const float *dV = V + (blockIdx.x) * ldv;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
if (i == 0)
c[0] = MAGMA_S_ONE;
/* lsum := v**H * C */
lsum = MAGMA_S_ZERO;
for (int j = i; j < m; j += BLOCK_SIZE)
lsum += MAGMA_S_MUL( MAGMA_S_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i == 0)
dwork [blockIdx.x] = -tau[0]*sum[0];
}
//==============================================================================
extern "C"
__global__ void
magma_sgemv_kernel2(int m, int n, const float * __restrict__ V, int ldv,
const float * __restrict__ x, float *c)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
float lsum;
V += j;
lsum = MAGMA_S_ZERO;
if (j < m) {
for (int k=0; k < n; k++)
lsum += MAGMA_S_MUL( V[k*ldv], x[k]);
c[j] -= lsum;
}
}
//==============================================================================
/*
Apply a real block reflector H to a real vector C from the left
(i.e., C = H C). H is represented in the form
H = I - V T V**H
where T is the real k-by-k upper triangular matrix in the
representation of the block reflector, and V is a real block of
k elementary reflectors.
*/
extern "C" void
magma_slarfbx_gpu(
magma_int_t m, magma_int_t k,
magmaFloat_ptr V, magma_int_t ldv,
magmaFloat_ptr dT, magma_int_t ldt,
magmaFloat_ptr c,
magmaFloat_ptr dwork)
{
/* dwork = V**H c */
magma_sgemv_kernel1<<< k, BLOCK_SIZE, 0, magma_stream >>>(m, V, ldv, c, dwork);
/* dwork = T**H dwork */
magma_strmv_tkernel<<< k, k, 0, magma_stream >>>( dT, ldt, dwork, dwork+k);
/* c = c - V dwork */
dim3 blocks3( magma_ceildiv( m, BLOCK_SIZE ) );
dim3 threads3( BLOCK_SIZE );
magma_sgemv_kernel2<<< blocks3, threads3, 0, magma_stream >>>( m, k, V, ldv, dwork+k, c);
}
//==============================================================================
|
1eee6a889b9fb2707985d39e37a75350c8ef817f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/permute_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void PermuteKernel(const int nthreads,
Dtype* const bottom_data, const bool forward, const int* permute_order,
const int* old_steps, const int* new_steps, const int num_axes,
Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int temp_idx = index;
int old_idx = 0;
for (int i = 0; i < num_axes; ++i) {
int order = permute_order[i];
old_idx += (temp_idx / new_steps[i]) * old_steps[order];
temp_idx %= new_steps[i];
}
if (forward) {
top_data[index] = bottom_data[old_idx];
} else {
bottom_data[old_idx] = top_data[index];
}
}
}
template <typename Dtype>
void PermuteLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (need_permute_) {
Dtype* bottom_data = bottom[0]->mutable_gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
const int* permute_order = permute_order_.gpu_data();
const int* new_steps = new_steps_.gpu_data();
const int* old_steps = old_steps_.gpu_data();
bool foward = true;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PermuteKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, foward, permute_order,
old_steps, new_steps, num_axes_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
// If there is no need to permute, we share data to save memory.
top[0]->ShareData(*bottom[0]);
}
}
template <typename Dtype>
void PermuteLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (need_permute_) {
Dtype* top_diff = top[0]->mutable_gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
const int* permute_order = permute_order_.gpu_data();
const int* new_steps = new_steps_.gpu_data();
const int* old_steps = old_steps_.gpu_data();
bool foward = false;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PermuteKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_diff, foward, permute_order,
old_steps, new_steps, num_axes_, top_diff);
CUDA_POST_KERNEL_CHECK;
} else {
// If there is no need to permute, we share diff to save memory.
bottom[0]->ShareDiff(*top[0]);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PermuteLayer);
} // namespace caffe
|
1eee6a889b9fb2707985d39e37a75350c8ef817f.cu
|
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/permute_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void PermuteKernel(const int nthreads,
Dtype* const bottom_data, const bool forward, const int* permute_order,
const int* old_steps, const int* new_steps, const int num_axes,
Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int temp_idx = index;
int old_idx = 0;
for (int i = 0; i < num_axes; ++i) {
int order = permute_order[i];
old_idx += (temp_idx / new_steps[i]) * old_steps[order];
temp_idx %= new_steps[i];
}
if (forward) {
top_data[index] = bottom_data[old_idx];
} else {
bottom_data[old_idx] = top_data[index];
}
}
}
template <typename Dtype>
void PermuteLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (need_permute_) {
Dtype* bottom_data = bottom[0]->mutable_gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
const int* permute_order = permute_order_.gpu_data();
const int* new_steps = new_steps_.gpu_data();
const int* old_steps = old_steps_.gpu_data();
bool foward = true;
// NOLINT_NEXT_LINE(whitespace/operators)
PermuteKernel<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, foward, permute_order,
old_steps, new_steps, num_axes_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
// If there is no need to permute, we share data to save memory.
top[0]->ShareData(*bottom[0]);
}
}
template <typename Dtype>
void PermuteLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (need_permute_) {
Dtype* top_diff = top[0]->mutable_gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
const int* permute_order = permute_order_.gpu_data();
const int* new_steps = new_steps_.gpu_data();
const int* old_steps = old_steps_.gpu_data();
bool foward = false;
// NOLINT_NEXT_LINE(whitespace/operators)
PermuteKernel<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_diff, foward, permute_order,
old_steps, new_steps, num_axes_, top_diff);
CUDA_POST_KERNEL_CHECK;
} else {
// If there is no need to permute, we share diff to save memory.
bottom[0]->ShareDiff(*top[0]);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PermuteLayer);
} // namespace caffe
|
cec7caa9ad0729158b5f489142bdcddcbd048f3c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <stdint.h>
#include "../../common/para.h"
#include "../../common/para.cuh"
#include <stdarg.h>
#include "runtime.cuh"
__device__ int syncID;
__device__ int threadNum;
int *done, *doneDev;
int *totalScheTasks, *totalScheTasksDev;
cTaskStruct *ccTaskPool;
gTaskStruct *ggTaskPool;
static int taskId = 0;
static int lastEmptyTask = 0;
static int round_count = 0;
static int taskIndex = 0;
static int barrierCount = 0;
hipStream_t master_kernel_stream;
hipStream_t runtime_stream;
__global__ void masterKernel(volatile int *done, volatile int *totalScheTasks, volatile gTaskStruct *gTaskPool);
void runtime_init(){
int i;
setenv("CUDA_DEVICE_MAX_CONNECTIONS", "32", 1);
checkCudaErrors(hipStreamCreate(&runtime_stream));
checkCudaErrors(hipStreamCreate(&master_kernel_stream));
// done flag to interrupt runtime
checkCudaErrors(hipHostMalloc(&done, sizeof(int), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&doneDev, sizeof(int)));
// host task buffer
checkCudaErrors(hipHostMalloc(&ccTaskPool, (BK_NUM*BP_NUM)*sizeof(cTaskStruct), hipHostMallocDefault));
// device task buffer
checkCudaErrors(hipMalloc(&ggTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct)));
// totalScheTasks:
checkCudaErrors(hipHostMalloc(&totalScheTasks, sizeof(int), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&totalScheTasksDev, sizeof(int)));
for(i = 0; i < (BK_NUM*BP_NUM); i++) {
ccTaskPool[i].ready = 0;
ccTaskPool[i].done = -1;
ccTaskPool[i].taskId = 0;
ccTaskPool[i].readyId = -1;
}
// runtime variables copy
*done = 0;
*totalScheTasks = 0;
checkCudaErrors(hipMemcpyAsync(doneDev, done, sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipMemcpyAsync(totalScheTasksDev, totalScheTasks, sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipMemcpyAsync(ggTaskPool, ccTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
//MasterKernel
hipLaunchKernelGGL(( masterKernel), dim3(BK_NUM), dim3(TD_NUM), SH_MEM_SIZE, master_kernel_stream, doneDev, totalScheTasksDev, ggTaskPool);
}
int taskLaunch(int paraN, ...){
int j, k;
int terminate = 1;
va_list ap;
va_start(ap,paraN);
while(taskIndex < (BK_NUM*BP_NUM) && terminate == 1){
if(ccTaskPool[taskIndex].ready == 0 && ccTaskPool[taskIndex].readyId == -1){
// **Add here**: renew task table, set the bit of task ID on
// **Add here**: get_ID()
ccTaskPool[taskIndex].ready = 1;
ccTaskPool[taskIndex].taskId = taskIndex+1;
ccTaskPool[taskIndex].done = 1;
if(round_count > 0) {
ccTaskPool[taskIndex].readyId = taskId;
}else{
lastEmptyTask = taskIndex;
}
round_count ++;
taskId = taskIndex;
for(j = 0; j < paraN; j++){ // set parameters
int type = va_arg(ap, enum mytypes);
switch(type)
{
case INT:
if(j == 0) ccTaskPool[taskIndex].thread = va_arg(ap, int);
if(j == 1) ccTaskPool[taskIndex].block = va_arg(ap, int);
if(j == 2) ccTaskPool[taskIndex].sharemem = va_arg(ap, int);
if(j == 3) ccTaskPool[taskIndex].sync = va_arg(ap, int);
if(j == 4) ccTaskPool[taskIndex].funcId = va_arg(ap, int);
if(j > 4) ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, int*);
break;
case FLOAT:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, float*);
break;
case DOUBLE:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, double*);
break;
case LONG:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, long*);
break;
default:
break;
} // End switch
} // End for paraN
checkCudaErrors(hipMemcpyAsync(ggTaskPool+taskIndex, ccTaskPool+taskIndex,
sizeof(gTaskStruct), hipMemcpyHostToDevice, runtime_stream));
terminate = 0;
} // end if cTaskPool
taskIndex++;
if(taskIndex == (BK_NUM*BP_NUM) && round_count > 0){
ccTaskPool[lastEmptyTask].readyId = taskId;
checkCudaErrors(hipMemcpyAsync((int*)&ggTaskPool[lastEmptyTask].readyId, (int*)&ccTaskPool[lastEmptyTask].readyId,sizeof(int),
hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
barrierCount ++;
round_count = 0;
}
if(taskIndex == (BK_NUM*BP_NUM)){
checkCudaErrors(hipMemcpyAsync(ccTaskPool, ggTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct), hipMemcpyDeviceToHost, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
taskIndex = 0;
}
} // end while i < BK_NUM*BP_NUM
va_end(ap);
return taskId;
}
void waitAll(int num_tasks){
*totalScheTasks = 0;
ccTaskPool[lastEmptyTask].readyId = taskId;
checkCudaErrors(hipMemcpyAsync((int*)&ggTaskPool[lastEmptyTask].readyId, (int*)&ccTaskPool[lastEmptyTask].readyId,
sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
round_count = 0;
int i;
while(*totalScheTasks < num_tasks){
checkCudaErrors(hipMemcpyAsync(totalScheTasks, totalScheTasksDev, sizeof(int), hipMemcpyDeviceToHost, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
}
*totalScheTasks = 0;
checkCudaErrors(hipMemcpyAsync(totalScheTasksDev, totalScheTasks, sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
taskIndex = 0;
taskId = 0;
lastEmptyTask = 0;
}
void runtime_destroy(){
*done = 1;
checkCudaErrors(hipMemcpyAsync(doneDev, done, sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
}
void runtime_free(){
checkCudaErrors(hipStreamDestroy(master_kernel_stream));
checkCudaErrors(hipStreamDestroy(runtime_stream));
checkCudaErrors(hipHostFree(done));
checkCudaErrors(hipHostFree(ccTaskPool));
checkCudaErrors(hipHostFree(totalScheTasks));
checkCudaErrors(hipFree(doneDev));
checkCudaErrors(hipFree(ggTaskPool));
checkCudaErrors(hipFree(totalScheTasksDev));
}
|
cec7caa9ad0729158b5f489142bdcddcbd048f3c.cu
|
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <stdint.h>
#include "../../common/para.h"
#include "../../common/para.cuh"
#include <stdarg.h>
#include "runtime.cuh"
__device__ int syncID;
__device__ int threadNum;
int *done, *doneDev;
int *totalScheTasks, *totalScheTasksDev;
cTaskStruct *ccTaskPool;
gTaskStruct *ggTaskPool;
static int taskId = 0;
static int lastEmptyTask = 0;
static int round_count = 0;
static int taskIndex = 0;
static int barrierCount = 0;
cudaStream_t master_kernel_stream;
cudaStream_t runtime_stream;
__global__ void masterKernel(volatile int *done, volatile int *totalScheTasks, volatile gTaskStruct *gTaskPool);
void runtime_init(){
int i;
setenv("CUDA_DEVICE_MAX_CONNECTIONS", "32", 1);
checkCudaErrors(cudaStreamCreate(&runtime_stream));
checkCudaErrors(cudaStreamCreate(&master_kernel_stream));
// done flag to interrupt runtime
checkCudaErrors(cudaHostAlloc(&done, sizeof(int), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&doneDev, sizeof(int)));
// host task buffer
checkCudaErrors(cudaHostAlloc(&ccTaskPool, (BK_NUM*BP_NUM)*sizeof(cTaskStruct), cudaHostAllocDefault));
// device task buffer
checkCudaErrors(cudaMalloc(&ggTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct)));
// totalScheTasks:
checkCudaErrors(cudaHostAlloc(&totalScheTasks, sizeof(int), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&totalScheTasksDev, sizeof(int)));
for(i = 0; i < (BK_NUM*BP_NUM); i++) {
ccTaskPool[i].ready = 0;
ccTaskPool[i].done = -1;
ccTaskPool[i].taskId = 0;
ccTaskPool[i].readyId = -1;
}
// runtime variables copy
*done = 0;
*totalScheTasks = 0;
checkCudaErrors(cudaMemcpyAsync(doneDev, done, sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaMemcpyAsync(totalScheTasksDev, totalScheTasks, sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaMemcpyAsync(ggTaskPool, ccTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
//MasterKernel
masterKernel<<<BK_NUM, TD_NUM, SH_MEM_SIZE, master_kernel_stream>>>(doneDev, totalScheTasksDev, ggTaskPool);
}
int taskLaunch(int paraN, ...){
int j, k;
int terminate = 1;
va_list ap;
va_start(ap,paraN);
while(taskIndex < (BK_NUM*BP_NUM) && terminate == 1){
if(ccTaskPool[taskIndex].ready == 0 && ccTaskPool[taskIndex].readyId == -1){
// **Add here**: renew task table, set the bit of task ID on
// **Add here**: get_ID()
ccTaskPool[taskIndex].ready = 1;
ccTaskPool[taskIndex].taskId = taskIndex+1;
ccTaskPool[taskIndex].done = 1;
if(round_count > 0) {
ccTaskPool[taskIndex].readyId = taskId;
}else{
lastEmptyTask = taskIndex;
}
round_count ++;
taskId = taskIndex;
for(j = 0; j < paraN; j++){ // set parameters
int type = va_arg(ap, enum mytypes);
switch(type)
{
case INT:
if(j == 0) ccTaskPool[taskIndex].thread = va_arg(ap, int);
if(j == 1) ccTaskPool[taskIndex].block = va_arg(ap, int);
if(j == 2) ccTaskPool[taskIndex].sharemem = va_arg(ap, int);
if(j == 3) ccTaskPool[taskIndex].sync = va_arg(ap, int);
if(j == 4) ccTaskPool[taskIndex].funcId = va_arg(ap, int);
if(j > 4) ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, int*);
break;
case FLOAT:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, float*);
break;
case DOUBLE:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, double*);
break;
case LONG:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, long*);
break;
default:
break;
} // End switch
} // End for paraN
checkCudaErrors(cudaMemcpyAsync(ggTaskPool+taskIndex, ccTaskPool+taskIndex,
sizeof(gTaskStruct), cudaMemcpyHostToDevice, runtime_stream));
terminate = 0;
} // end if cTaskPool
taskIndex++;
if(taskIndex == (BK_NUM*BP_NUM) && round_count > 0){
ccTaskPool[lastEmptyTask].readyId = taskId;
checkCudaErrors(cudaMemcpyAsync((int*)&ggTaskPool[lastEmptyTask].readyId, (int*)&ccTaskPool[lastEmptyTask].readyId,sizeof(int),
cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
barrierCount ++;
round_count = 0;
}
if(taskIndex == (BK_NUM*BP_NUM)){
checkCudaErrors(cudaMemcpyAsync(ccTaskPool, ggTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct), cudaMemcpyDeviceToHost, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
taskIndex = 0;
}
} // end while i < BK_NUM*BP_NUM
va_end(ap);
return taskId;
}
void waitAll(int num_tasks){
*totalScheTasks = 0;
ccTaskPool[lastEmptyTask].readyId = taskId;
checkCudaErrors(cudaMemcpyAsync((int*)&ggTaskPool[lastEmptyTask].readyId, (int*)&ccTaskPool[lastEmptyTask].readyId,
sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
round_count = 0;
int i;
while(*totalScheTasks < num_tasks){
checkCudaErrors(cudaMemcpyAsync(totalScheTasks, totalScheTasksDev, sizeof(int), cudaMemcpyDeviceToHost, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
}
*totalScheTasks = 0;
checkCudaErrors(cudaMemcpyAsync(totalScheTasksDev, totalScheTasks, sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
taskIndex = 0;
taskId = 0;
lastEmptyTask = 0;
}
void runtime_destroy(){
*done = 1;
checkCudaErrors(cudaMemcpyAsync(doneDev, done, sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
}
void runtime_free(){
checkCudaErrors(cudaStreamDestroy(master_kernel_stream));
checkCudaErrors(cudaStreamDestroy(runtime_stream));
checkCudaErrors(cudaFreeHost(done));
checkCudaErrors(cudaFreeHost(ccTaskPool));
checkCudaErrors(cudaFreeHost(totalScheTasks));
checkCudaErrors(cudaFree(doneDev));
checkCudaErrors(cudaFree(ggTaskPool));
checkCudaErrors(cudaFree(totalScheTasksDev));
}
|
5de10083f2720eeea1d0977bebe55f90117566fc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "split_pairwise.cuh"
#include "split_properties_helpers.cuh"
#include <hip/hip_cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel {
__forceinline__ __device__ void AddToMatrices(int row, int col, float sum,
float* matrix) {
const int ind = col < row ? (row * (row + 1) >> 1) + col : (col * (col + 1) >> 1) + row;
matrix[ind] += sum;
}
template <int BLOCK_SIZE, int PartCount>
__global__ void MakePairwiseDerivatives(const float* pairwiseHistogram,
int matrixOffset,
int matCount,
int histLineSize /* 4 * totalBinFeatureCount */,
float* linearSystem) {
const int logicalWarpSize = PartCount > 32 ? 32 : PartCount;
const int matricesPerBlock = BLOCK_SIZE / logicalWarpSize;
int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / logicalWarpSize;
int localTid = threadIdx.x & (logicalWarpSize - 1);
const int inBlockOffset = threadIdx.x / logicalWarpSize;
if (matrixIdx >= matCount)
return;
{
const size_t rowSize = PartCount * 2;
const size_t linearSystemSize = (rowSize + rowSize * (rowSize + 1) / 2);
linearSystem += matrixIdx * linearSystemSize;
}
pairwiseHistogram += (matrixOffset + matrixIdx) * 4;
__shared__ float lineData[BLOCK_SIZE * 2];
const int N = PartCount / logicalWarpSize;
const int logicalWarpId = threadIdx.x / logicalWarpSize;
const int logicalWarpCount = BLOCK_SIZE / logicalWarpSize;
thread_block_tile<logicalWarpSize> groupTile = tiled_partition<logicalWarpSize>(this_thread_block());
float sum0[N];
float sum1[N];
for (int i = 0; i < N; ++i) {
sum0[i] = 0;
sum1[i] = 0;
}
#pragma unroll 16
for (int y = 0; y < PartCount; ++y) {
#pragma unroll
for (int i = 0; i < N; ++i) {
const int x = localTid + 32 * i;
const int partIdx = ConvertBlockToPart(x, y);
ui64 offset = ((ui64) partIdx * histLineSize * 4ULL);
float4 hist = __ldg((float4*)(pairwiseHistogram + offset));
const float w00 = (x != y ? hist.x : 0.0f);
const float w01 = hist.y;
const float w10 = hist.z;
const float w11 = (x != y ? hist.w : 0.0f);
// sync for row write done in reduce if we need it
const int nextRow = 2 * y;
const int nextCol = 2 * x;
sum0[i] += w00 + w10;
sum1[i] += w01 + w11;
if (x == y) {
AddToMatrices(nextRow + 1, nextRow, -(w01 + w10), linearSystem);
} else if (x < y) {
AddToMatrices(nextRow, nextCol, -w00, linearSystem);
AddToMatrices(nextRow, nextCol + 1, -w01, linearSystem);
AddToMatrices(nextRow + 1, nextCol, -w10, linearSystem);
AddToMatrices(nextRow + 1, nextCol + 1, -w11, linearSystem);
}
groupTile.sync();
}
groupTile.sync();
}
#pragma unroll 16
for (int x = 0; x < PartCount; ++x) {
#pragma unroll
for (int i = 0; i < N; ++i) {
const int y = localTid + 32 * i;
const int partIdx = ConvertBlockToPart(x, y);
ui64 offset = ((ui64) partIdx * histLineSize * 4ULL);
float4 hist = __ldg((float4*)(pairwiseHistogram + offset));
const float w00 = (x != y ? hist.x : 0.0f);
const float w01 = hist.y;
const float w10 = hist.z;
const float w11 = (x != y ? hist.w : 0.0f);
// sync for row write done in reduce if we need it
const int nextRow = 2 * y;
const int nextCol = 2 * x;
sum0[i] += w01 + w00;
sum1[i] += w10 + w11;
if (x > y) {
AddToMatrices(nextRow, nextCol, -w00, linearSystem);
AddToMatrices(nextRow, nextCol + 1, -w01, linearSystem);
AddToMatrices(nextRow + 1, nextCol, -w10, linearSystem);
AddToMatrices(nextRow + 1, nextCol + 1, -w11, linearSystem);
}
groupTile.sync();
}
groupTile.sync();
}
#pragma unroll
for (int i = 0; i < N; ++i) {
const int x = localTid + 32 * i;
const int nextRow = 2 * x;
linearSystem[nextRow * (nextRow + 1) / 2 + nextRow] += sum0[i];
linearSystem[(nextRow + 1) * (nextRow + 2) / 2 + nextRow + 1] += sum1[i];
}
}
template <int BLOCK_SIZE>
void RunMakeMatrices(const float* histogram, int partCount, int histLineSize, int firstMatrix, int matricesCount, float* linearSystem, TCudaStream stream) {
if (matricesCount > 0) {
const int numBlocks = (((size_t) matricesCount) * min(partCount, 32) + BLOCK_SIZE - 1) / BLOCK_SIZE;
#define RUN(PartCount)\
MakePairwiseDerivatives<BLOCK_SIZE, PartCount> << < numBlocks, BLOCK_SIZE, 0, stream >> > (histogram, firstMatrix, matricesCount, histLineSize, linearSystem);
if (partCount == 1) {
RUN(1)
} else if (partCount == 2) {
RUN(2)
} else if (partCount == 4) {
RUN(4)
} else if (partCount == 8) {
RUN(8)
} else if (partCount == 16) {
RUN(16)
} else if (partCount == 32) {
RUN(32)
} else if (partCount == 64) {
RUN(64)
} else if (partCount == 128) {
RUN(128)
} else {
exit(0);
}
}
}
void MakePairwiseDerivatives(const float* histogram, int leavesCount, int firstMatrix, int matricesCount, int histLineSize, float* linearSystem,
TCudaStream stream) {
if (TArchProps::GetMajorVersion() == 2 && (leavesCount <= 64)) {
RunMakeMatrices<192>(histogram, leavesCount, histLineSize, firstMatrix, matricesCount, linearSystem, stream);
} else {
RunMakeMatrices<256>(histogram, leavesCount, histLineSize, firstMatrix, matricesCount, linearSystem, stream);
}
}
template <int BLOCK_SIZE>
__global__ void MakePointwiseDerivatives(const float* pointwiseHist, ui64 pointwiseHistSize,
const TPartitionStatistics* partStats,
bool hasPointwiseWeights,
int rowSize,
int firstMatrixIdx,
int matCount,
float* linearSystem) {
const int lineSize = min(rowSize, 32);
const int matricesPerBlock = BLOCK_SIZE / lineSize;
const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / lineSize;
pointwiseHist += (firstMatrixIdx + matrixIdx) * (hasPointwiseWeights ? 2 : 1);
linearSystem += ((size_t)matrixIdx) * (rowSize + rowSize * (rowSize + 1) / 2);
const int x = threadIdx.x & (lineSize - 1);
float* targets = linearSystem + rowSize * (rowSize + 1) / 2;
if (matrixIdx < matCount) {
for (int col = x; col < rowSize; col += 32) {
const int i = col / 2;
ui64 offset = pointwiseHistSize * i;
if (hasPointwiseWeights) {
const float leafWeight = pointwiseHist[offset];
const float weight = (col & 1) ? partStats[i].Weight - leafWeight : leafWeight;
linearSystem[col * (col + 1) / 2 + col] += max(weight, 0.0f);
}
const float leafSum = pointwiseHist[offset + hasPointwiseWeights];
const float sum = (col & 1) ? partStats[i].Sum - leafSum : leafSum;
targets[col] = sum;
}
}
}
template <int BLOCK_SIZE>
void RunMakePointwiseDerivatives(const float* pointwiseHist, int binFeatureCount,
const TPartitionStatistics* partStats,
bool hasPointwiseWeights,
int rowSize,
int firstMatrixIdx,
int matricesCount,
float* linearSystem,
TCudaStream stream
) {
if (matricesCount > 0) {
const ui32 pointwiseHistSize = binFeatureCount * (hasPointwiseWeights ? 2 : 1);
const int lineSize = min(32, rowSize);
const int numBlocks = (((size_t) matricesCount) * lineSize + BLOCK_SIZE - 1) / BLOCK_SIZE;
MakePointwiseDerivatives<BLOCK_SIZE> << < numBlocks, BLOCK_SIZE, 0, stream >> > (pointwiseHist, pointwiseHistSize, partStats, hasPointwiseWeights, rowSize, firstMatrixIdx, matricesCount, linearSystem);
}
}
void MakePointwiseDerivatives(const float* pointwiseHist, int pointwiseHistLineSize,
const TPartitionStatistics* partStats,
bool hasPointwiseWeights,
int rowSize,
int firstMatrixIdx,
int matricesCount,
float* linearSystem,
TCudaStream stream) {
if (TArchProps::GetMajorVersion() == 2) {
RunMakePointwiseDerivatives<192> (pointwiseHist, pointwiseHistLineSize, partStats, hasPointwiseWeights, rowSize, firstMatrixIdx, matricesCount, linearSystem, stream);
} else {
RunMakePointwiseDerivatives<128> (pointwiseHist, pointwiseHistLineSize, partStats, hasPointwiseWeights, rowSize, firstMatrixIdx, matricesCount, linearSystem, stream);
}
}
__global__ void UpdateBinsPairs(TCFeature feature, ui32 binIdx,
const ui32* cindex,
const uint2* pairs,
ui32 pairCount,
ui32 depth,
ui32* bins) {
ui32 idx = blockIdx.x * blockDim.x + threadIdx.x;
cindex += feature.Offset;
const ui32 value = binIdx << feature.Shift;
const ui32 mask = feature.Mask << feature.Shift;
while (idx < pairCount) {
const uint2 p = pairs[idx];
const ui32 d1 = (cindex[p.x] & mask);
const ui32 d2 = (cindex[p.y] & mask);
ui32 bit1 = feature.OneHotFeature ? d1 == value : d1 > value;
ui32 bit2 = feature.OneHotFeature ? d2 == value : d2 > value;
ui32 bin = bins[idx];
bin = ((bit1 * 2 + bit2) << (depth * 2)) | bin;
bins[idx] = bin;
idx += blockDim.x * gridDim.x;
}
}
void UpdateBinsPairs(TCFeature feature, ui32 bin,
const ui32* compressedIndex,
const uint2* pairs,
ui32 pairCount,
ui32 depth,
ui32* bins,
TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = min((pairCount + blockSize - 1) / blockSize,
TArchProps::MaxBlockCount());
hipLaunchKernelGGL(( UpdateBinsPairs), dim3(numBlocks), dim3(blockSize), 0, stream, feature, bin, compressedIndex, pairs, pairCount, depth, bins);
}
template <int BLOCK_SIZE>
__global__ void SelectBestSplitImpl(const float* scores,
const TCBinFeature* binFeature, int size,
int bestIndexBias, TBestSplitPropertiesWithIndex* best) {
float maxScore = -5000000.0f;
int maxIdx = -1;
int tid = threadIdx.x;
#pragma unroll 8
for (int i = tid; i < size; i += BLOCK_SIZE) {
float score = scores[i];
if (score > maxScore) {
maxScore = score;
maxIdx = i;
}
}
__shared__ float vals[BLOCK_SIZE];
__shared__ int inds[BLOCK_SIZE];
vals[tid] = maxScore;
inds[tid] = maxIdx;
__syncthreads();
for (int s = BLOCK_SIZE >> 1; s > 0; s >>= 1) {
if (tid < s) {
if ( vals[tid] < vals[tid + s] || (vals[tid] == vals[tid + s] && inds[tid] > inds[tid + s]) ) {
vals[tid] = vals[tid + s];
inds[tid] = inds[tid + s];
}
}
__syncthreads();
}
if (tid == 0) {
TCBinFeature bestFeature;
const int bestIdx = inds[0];
const float bestScore = vals[0];
if (bestIdx != -1) {
bestFeature = binFeature[bestIdx];
} else {
bestFeature.BinId = 0;
bestFeature.FeatureId = 0;
}
best->Index = bestIndexBias + bestIdx;
best->Score = -bestScore;
best->BinId = bestFeature.BinId;
best->FeatureId = bestFeature.FeatureId;
}
}
void SelectBestSplit(const float* scores,
const TCBinFeature* binFeature, int size,
int bestIndexBias, TBestSplitPropertiesWithIndex* best,
TCudaStream stream) {
const int blockSize = 1024;
hipLaunchKernelGGL(( SelectBestSplitImpl<blockSize>), dim3(1), dim3(blockSize), 0, stream, scores, binFeature, size, bestIndexBias, best);
}
__global__ void ZeroSameLeafBinWeightsImpl(const uint2* pairs,
const ui32* bins,
ui32 pairCount,
float* pairWeights) {
const ui32 i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < pairCount) {
uint2 pair = pairs[i];
const ui32 binx = bins[pair.x];
const ui32 biny = bins[pair.y];
if (binx == biny) {
pairWeights[i] = 0;
}
}
}
void ZeroSameLeafBinWeights(const uint2* pairs,
const ui32* bins,
ui32 pairCount,
float* pairWeights,
TCudaStream stream
) {
if (pairCount > 0) {
const int blockSize = 256;
const ui32 numBlocks = (pairCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( ZeroSameLeafBinWeightsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, pairs, bins, pairCount, pairWeights);
}
}
__global__ void FillPairBinsImpl(const uint2* pairs,
const ui32* bins,
ui32 rowSize,
ui32 pairCount,
ui32* pairBins) {
const ui32 i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < pairCount) {
uint2 pair = pairs[i];
const ui32 binx = bins[pair.x];
const ui32 biny = bins[pair.y];
pairBins[i] = binx * rowSize + biny;
}
}
void FillPairBins(const uint2* pairs,
const ui32* bins,
ui32 binCount,
ui32 pairCount,
ui32* pairBins,
TCudaStream stream) {
if (pairCount > 0) {
const int blockSize = 256;
const ui32 numBlocks = (pairCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( FillPairBinsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, pairs, bins, binCount, pairCount, pairBins);
}
}
//for leaves estimation
__global__ void FillPairDer2OnlyImpl(const float* ders2,
const float* groupDers2,
const ui32* qids,
const uint2* pairs,
ui32 pairCount,
float* pairDer2) {
const int tid = threadIdx.x;
const int i = blockIdx.x * blockDim.x + tid;
if (i < pairCount) {
uint2 pair = Ldg(pairs + i);
const float der2x = Ldg(ders2 + pair.x);
const float der2y = Ldg(ders2 + pair.y);
const int qid = Ldg(qids + pair.x);
const float groupDer2 = Ldg(groupDers2 + qid);
pairDer2[i] = groupDer2 > 1e-20f ? der2x * der2y / (groupDer2 + 1e-20f) : 0;
}
}
void FillPairDer2Only(const float* ders2,
const float* groupDers2,
const ui32* qids,
const uint2* pairs,
ui32 pairCount,
float* pairDer2,
TCudaStream stream
) {
const int blockSize = 256;
const int numBlocks = (pairCount + blockSize - 1) / blockSize;
if (numBlocks > 0) {
hipLaunchKernelGGL(( FillPairDer2OnlyImpl), dim3(numBlocks), dim3(blockSize), 0, stream , ders2, groupDers2, qids, pairs, pairCount, pairDer2);
}
}
}
|
5de10083f2720eeea1d0977bebe55f90117566fc.cu
|
#include "split_pairwise.cuh"
#include "split_properties_helpers.cuh"
#include <cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel {
__forceinline__ __device__ void AddToMatrices(int row, int col, float sum,
float* matrix) {
const int ind = col < row ? (row * (row + 1) >> 1) + col : (col * (col + 1) >> 1) + row;
matrix[ind] += sum;
}
template <int BLOCK_SIZE, int PartCount>
__global__ void MakePairwiseDerivatives(const float* pairwiseHistogram,
int matrixOffset,
int matCount,
int histLineSize /* 4 * totalBinFeatureCount */,
float* linearSystem) {
const int logicalWarpSize = PartCount > 32 ? 32 : PartCount;
const int matricesPerBlock = BLOCK_SIZE / logicalWarpSize;
int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / logicalWarpSize;
int localTid = threadIdx.x & (logicalWarpSize - 1);
const int inBlockOffset = threadIdx.x / logicalWarpSize;
if (matrixIdx >= matCount)
return;
{
const size_t rowSize = PartCount * 2;
const size_t linearSystemSize = (rowSize + rowSize * (rowSize + 1) / 2);
linearSystem += matrixIdx * linearSystemSize;
}
pairwiseHistogram += (matrixOffset + matrixIdx) * 4;
__shared__ float lineData[BLOCK_SIZE * 2];
const int N = PartCount / logicalWarpSize;
const int logicalWarpId = threadIdx.x / logicalWarpSize;
const int logicalWarpCount = BLOCK_SIZE / logicalWarpSize;
thread_block_tile<logicalWarpSize> groupTile = tiled_partition<logicalWarpSize>(this_thread_block());
float sum0[N];
float sum1[N];
for (int i = 0; i < N; ++i) {
sum0[i] = 0;
sum1[i] = 0;
}
#pragma unroll 16
for (int y = 0; y < PartCount; ++y) {
#pragma unroll
for (int i = 0; i < N; ++i) {
const int x = localTid + 32 * i;
const int partIdx = ConvertBlockToPart(x, y);
ui64 offset = ((ui64) partIdx * histLineSize * 4ULL);
float4 hist = __ldg((float4*)(pairwiseHistogram + offset));
const float w00 = (x != y ? hist.x : 0.0f);
const float w01 = hist.y;
const float w10 = hist.z;
const float w11 = (x != y ? hist.w : 0.0f);
// sync for row write done in reduce if we need it
const int nextRow = 2 * y;
const int nextCol = 2 * x;
sum0[i] += w00 + w10;
sum1[i] += w01 + w11;
if (x == y) {
AddToMatrices(nextRow + 1, nextRow, -(w01 + w10), linearSystem);
} else if (x < y) {
AddToMatrices(nextRow, nextCol, -w00, linearSystem);
AddToMatrices(nextRow, nextCol + 1, -w01, linearSystem);
AddToMatrices(nextRow + 1, nextCol, -w10, linearSystem);
AddToMatrices(nextRow + 1, nextCol + 1, -w11, linearSystem);
}
groupTile.sync();
}
groupTile.sync();
}
#pragma unroll 16
for (int x = 0; x < PartCount; ++x) {
#pragma unroll
for (int i = 0; i < N; ++i) {
const int y = localTid + 32 * i;
const int partIdx = ConvertBlockToPart(x, y);
ui64 offset = ((ui64) partIdx * histLineSize * 4ULL);
float4 hist = __ldg((float4*)(pairwiseHistogram + offset));
const float w00 = (x != y ? hist.x : 0.0f);
const float w01 = hist.y;
const float w10 = hist.z;
const float w11 = (x != y ? hist.w : 0.0f);
// sync for row write done in reduce if we need it
const int nextRow = 2 * y;
const int nextCol = 2 * x;
sum0[i] += w01 + w00;
sum1[i] += w10 + w11;
if (x > y) {
AddToMatrices(nextRow, nextCol, -w00, linearSystem);
AddToMatrices(nextRow, nextCol + 1, -w01, linearSystem);
AddToMatrices(nextRow + 1, nextCol, -w10, linearSystem);
AddToMatrices(nextRow + 1, nextCol + 1, -w11, linearSystem);
}
groupTile.sync();
}
groupTile.sync();
}
#pragma unroll
for (int i = 0; i < N; ++i) {
const int x = localTid + 32 * i;
const int nextRow = 2 * x;
linearSystem[nextRow * (nextRow + 1) / 2 + nextRow] += sum0[i];
linearSystem[(nextRow + 1) * (nextRow + 2) / 2 + nextRow + 1] += sum1[i];
}
}
template <int BLOCK_SIZE>
void RunMakeMatrices(const float* histogram, int partCount, int histLineSize, int firstMatrix, int matricesCount, float* linearSystem, TCudaStream stream) {
if (matricesCount > 0) {
const int numBlocks = (((size_t) matricesCount) * min(partCount, 32) + BLOCK_SIZE - 1) / BLOCK_SIZE;
#define RUN(PartCount)\
MakePairwiseDerivatives<BLOCK_SIZE, PartCount> << < numBlocks, BLOCK_SIZE, 0, stream >> > (histogram, firstMatrix, matricesCount, histLineSize, linearSystem);
if (partCount == 1) {
RUN(1)
} else if (partCount == 2) {
RUN(2)
} else if (partCount == 4) {
RUN(4)
} else if (partCount == 8) {
RUN(8)
} else if (partCount == 16) {
RUN(16)
} else if (partCount == 32) {
RUN(32)
} else if (partCount == 64) {
RUN(64)
} else if (partCount == 128) {
RUN(128)
} else {
exit(0);
}
}
}
void MakePairwiseDerivatives(const float* histogram, int leavesCount, int firstMatrix, int matricesCount, int histLineSize, float* linearSystem,
TCudaStream stream) {
if (TArchProps::GetMajorVersion() == 2 && (leavesCount <= 64)) {
RunMakeMatrices<192>(histogram, leavesCount, histLineSize, firstMatrix, matricesCount, linearSystem, stream);
} else {
RunMakeMatrices<256>(histogram, leavesCount, histLineSize, firstMatrix, matricesCount, linearSystem, stream);
}
}
template <int BLOCK_SIZE>
__global__ void MakePointwiseDerivatives(const float* pointwiseHist, ui64 pointwiseHistSize,
const TPartitionStatistics* partStats,
bool hasPointwiseWeights,
int rowSize,
int firstMatrixIdx,
int matCount,
float* linearSystem) {
const int lineSize = min(rowSize, 32);
const int matricesPerBlock = BLOCK_SIZE / lineSize;
const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / lineSize;
pointwiseHist += (firstMatrixIdx + matrixIdx) * (hasPointwiseWeights ? 2 : 1);
linearSystem += ((size_t)matrixIdx) * (rowSize + rowSize * (rowSize + 1) / 2);
const int x = threadIdx.x & (lineSize - 1);
float* targets = linearSystem + rowSize * (rowSize + 1) / 2;
if (matrixIdx < matCount) {
for (int col = x; col < rowSize; col += 32) {
const int i = col / 2;
ui64 offset = pointwiseHistSize * i;
if (hasPointwiseWeights) {
const float leafWeight = pointwiseHist[offset];
const float weight = (col & 1) ? partStats[i].Weight - leafWeight : leafWeight;
linearSystem[col * (col + 1) / 2 + col] += max(weight, 0.0f);
}
const float leafSum = pointwiseHist[offset + hasPointwiseWeights];
const float sum = (col & 1) ? partStats[i].Sum - leafSum : leafSum;
targets[col] = sum;
}
}
}
template <int BLOCK_SIZE>
void RunMakePointwiseDerivatives(const float* pointwiseHist, int binFeatureCount,
const TPartitionStatistics* partStats,
bool hasPointwiseWeights,
int rowSize,
int firstMatrixIdx,
int matricesCount,
float* linearSystem,
TCudaStream stream
) {
if (matricesCount > 0) {
const ui32 pointwiseHistSize = binFeatureCount * (hasPointwiseWeights ? 2 : 1);
const int lineSize = min(32, rowSize);
const int numBlocks = (((size_t) matricesCount) * lineSize + BLOCK_SIZE - 1) / BLOCK_SIZE;
MakePointwiseDerivatives<BLOCK_SIZE> << < numBlocks, BLOCK_SIZE, 0, stream >> > (pointwiseHist, pointwiseHistSize, partStats, hasPointwiseWeights, rowSize, firstMatrixIdx, matricesCount, linearSystem);
}
}
void MakePointwiseDerivatives(const float* pointwiseHist, int pointwiseHistLineSize,
const TPartitionStatistics* partStats,
bool hasPointwiseWeights,
int rowSize,
int firstMatrixIdx,
int matricesCount,
float* linearSystem,
TCudaStream stream) {
if (TArchProps::GetMajorVersion() == 2) {
RunMakePointwiseDerivatives<192> (pointwiseHist, pointwiseHistLineSize, partStats, hasPointwiseWeights, rowSize, firstMatrixIdx, matricesCount, linearSystem, stream);
} else {
RunMakePointwiseDerivatives<128> (pointwiseHist, pointwiseHistLineSize, partStats, hasPointwiseWeights, rowSize, firstMatrixIdx, matricesCount, linearSystem, stream);
}
}
__global__ void UpdateBinsPairs(TCFeature feature, ui32 binIdx,
const ui32* cindex,
const uint2* pairs,
ui32 pairCount,
ui32 depth,
ui32* bins) {
ui32 idx = blockIdx.x * blockDim.x + threadIdx.x;
cindex += feature.Offset;
const ui32 value = binIdx << feature.Shift;
const ui32 mask = feature.Mask << feature.Shift;
while (idx < pairCount) {
const uint2 p = pairs[idx];
const ui32 d1 = (cindex[p.x] & mask);
const ui32 d2 = (cindex[p.y] & mask);
ui32 bit1 = feature.OneHotFeature ? d1 == value : d1 > value;
ui32 bit2 = feature.OneHotFeature ? d2 == value : d2 > value;
ui32 bin = bins[idx];
bin = ((bit1 * 2 + bit2) << (depth * 2)) | bin;
bins[idx] = bin;
idx += blockDim.x * gridDim.x;
}
}
void UpdateBinsPairs(TCFeature feature, ui32 bin,
const ui32* compressedIndex,
const uint2* pairs,
ui32 pairCount,
ui32 depth,
ui32* bins,
TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = min((pairCount + blockSize - 1) / blockSize,
TArchProps::MaxBlockCount());
UpdateBinsPairs<<<numBlocks, blockSize, 0, stream>>>(feature, bin, compressedIndex, pairs, pairCount, depth, bins);
}
template <int BLOCK_SIZE>
__global__ void SelectBestSplitImpl(const float* scores,
const TCBinFeature* binFeature, int size,
int bestIndexBias, TBestSplitPropertiesWithIndex* best) {
float maxScore = -5000000.0f;
int maxIdx = -1;
int tid = threadIdx.x;
#pragma unroll 8
for (int i = tid; i < size; i += BLOCK_SIZE) {
float score = scores[i];
if (score > maxScore) {
maxScore = score;
maxIdx = i;
}
}
__shared__ float vals[BLOCK_SIZE];
__shared__ int inds[BLOCK_SIZE];
vals[tid] = maxScore;
inds[tid] = maxIdx;
__syncthreads();
for (int s = BLOCK_SIZE >> 1; s > 0; s >>= 1) {
if (tid < s) {
if ( vals[tid] < vals[tid + s] || (vals[tid] == vals[tid + s] && inds[tid] > inds[tid + s]) ) {
vals[tid] = vals[tid + s];
inds[tid] = inds[tid + s];
}
}
__syncthreads();
}
if (tid == 0) {
TCBinFeature bestFeature;
const int bestIdx = inds[0];
const float bestScore = vals[0];
if (bestIdx != -1) {
bestFeature = binFeature[bestIdx];
} else {
bestFeature.BinId = 0;
bestFeature.FeatureId = 0;
}
best->Index = bestIndexBias + bestIdx;
best->Score = -bestScore;
best->BinId = bestFeature.BinId;
best->FeatureId = bestFeature.FeatureId;
}
}
void SelectBestSplit(const float* scores,
const TCBinFeature* binFeature, int size,
int bestIndexBias, TBestSplitPropertiesWithIndex* best,
TCudaStream stream) {
const int blockSize = 1024;
SelectBestSplitImpl<blockSize><<<1, blockSize, 0, stream>>>(scores, binFeature, size, bestIndexBias, best);
}
__global__ void ZeroSameLeafBinWeightsImpl(const uint2* pairs,
const ui32* bins,
ui32 pairCount,
float* pairWeights) {
const ui32 i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < pairCount) {
uint2 pair = pairs[i];
const ui32 binx = bins[pair.x];
const ui32 biny = bins[pair.y];
if (binx == biny) {
pairWeights[i] = 0;
}
}
}
void ZeroSameLeafBinWeights(const uint2* pairs,
const ui32* bins,
ui32 pairCount,
float* pairWeights,
TCudaStream stream
) {
if (pairCount > 0) {
const int blockSize = 256;
const ui32 numBlocks = (pairCount + blockSize - 1) / blockSize;
ZeroSameLeafBinWeightsImpl<<<numBlocks, blockSize, 0, stream>>>(pairs, bins, pairCount, pairWeights);
}
}
__global__ void FillPairBinsImpl(const uint2* pairs,
const ui32* bins,
ui32 rowSize,
ui32 pairCount,
ui32* pairBins) {
const ui32 i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < pairCount) {
uint2 pair = pairs[i];
const ui32 binx = bins[pair.x];
const ui32 biny = bins[pair.y];
pairBins[i] = binx * rowSize + biny;
}
}
void FillPairBins(const uint2* pairs,
const ui32* bins,
ui32 binCount,
ui32 pairCount,
ui32* pairBins,
TCudaStream stream) {
if (pairCount > 0) {
const int blockSize = 256;
const ui32 numBlocks = (pairCount + blockSize - 1) / blockSize;
FillPairBinsImpl<<<numBlocks, blockSize, 0, stream>>>(pairs, bins, binCount, pairCount, pairBins);
}
}
//for leaves estimation
__global__ void FillPairDer2OnlyImpl(const float* ders2,
const float* groupDers2,
const ui32* qids,
const uint2* pairs,
ui32 pairCount,
float* pairDer2) {
const int tid = threadIdx.x;
const int i = blockIdx.x * blockDim.x + tid;
if (i < pairCount) {
uint2 pair = Ldg(pairs + i);
const float der2x = Ldg(ders2 + pair.x);
const float der2y = Ldg(ders2 + pair.y);
const int qid = Ldg(qids + pair.x);
const float groupDer2 = Ldg(groupDers2 + qid);
pairDer2[i] = groupDer2 > 1e-20f ? der2x * der2y / (groupDer2 + 1e-20f) : 0;
}
}
void FillPairDer2Only(const float* ders2,
const float* groupDers2,
const ui32* qids,
const uint2* pairs,
ui32 pairCount,
float* pairDer2,
TCudaStream stream
) {
const int blockSize = 256;
const int numBlocks = (pairCount + blockSize - 1) / blockSize;
if (numBlocks > 0) {
FillPairDer2OnlyImpl<<< numBlocks, blockSize, 0, stream >>>(ders2, groupDers2, qids, pairs, pairCount, pairDer2);
}
}
}
|
abc9f4302e9ae4873b89b787fa3beb999e89b111.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdint>
#include <iostream>
#include "helpers.cuh"
#include "encryption.cuh"
// Host function.
void encrypt_cpu(uint64_t * data, uint64_t num_entries,
uint64_t num_iters, bool parallel=true) {
// Use OpenMP to use all available CPU cores.
#pragma omp parallel for if (parallel)
for (uint64_t entry = 0; entry < num_entries; entry++)
// Permute each data entry the number of iterations and then write result to data.
data[entry] = permute64(entry, num_iters);
}
// Device function.
__global__
void decrypt_gpu(uint64_t * data, uint64_t num_entries,
uint64_t num_iters) {
const uint64_t thrdID = blockIdx.x*blockDim.x+threadIdx.x;
const uint64_t stride = blockDim.x*gridDim.x;
//printf(" checkpoint i0\n");
// Utilize grid-stride loop for arbitrary data sizes.
for (uint64_t entry = thrdID; entry < num_entries; entry += stride)
// Unpermute each data entry the number of iterations then write result to data.
data[entry] = unpermute64(data[entry], num_iters);
//printf(" checkpoint i1\n");
}
// Host function.
bool check_result_cpu(uint64_t * data, uint64_t num_entries,
bool parallel=true) {
uint64_t counter = 0;
#pragma omp parallel for reduction(+: counter) if (parallel)
for (uint64_t entry = 0; entry < num_entries; entry++)
// Because we created initial data values by ranging from 0 to N-1,
// and because encrypting and decrypting is symmetrical,
// then each data entry should be equal to `entry`.
counter += data[entry] == entry;
// True if all values have been correctly decrypted.
return counter == num_entries;
}
int main (int argc, char * argv[]) {
// This file will be used to cache encryption results
// so we don't have to wait on the CPU every time.
//const char * encrypted_file = "/dli/task/encrypted";
const char * encrypted_file = "hello";
//"/home/babak/Codes/Learning/HPC/2_Cuda/6_Accelerating_CUDA_C++_Applications_with_Concurrent_Streams/3_Application/hello";
// Timer instance to be used for sections of the application.
Timer timer;
// Timer instance to be used for total time on the GPU(s).
Timer overall;
const uint64_t num_entries = 1UL << 26;
const uint64_t num_iters = 1UL << 10;
// Use all available CPUs in parallel for host calculations.
//const bool openmp = true;
const bool openmp = false;
// This timer start and then stop pattern will be used throughout the application.
timer.start();
uint64_t * data_cpu, * data_gpu;
// hipHostMalloc will be discussed at length later in the course.
hipHostMalloc(&data_cpu, sizeof(uint64_t)*num_entries);
hipMalloc (&data_gpu, sizeof(uint64_t)*num_entries);
timer.stop("allocate memory");
check_last_error();
timer.start();
// If encryption cache file does not exist...
if (!encrypted_file_exists(encrypted_file)) {
// ...encrypt data in parallel on CPU...
std::cout << " encrypting... \n ";
encrypt_cpu(data_cpu, num_entries, num_iters, openmp);
// ...and make encryption cache file for later.
write_encrypted_to_file(encrypted_file, data_cpu, sizeof(uint64_t)*num_entries);
} else {
std::cout << " reading... \n ";
// Use encryption cache file if it exists.
read_encrypted_from_file(encrypted_file, data_cpu, sizeof(uint64_t)*num_entries);
}
timer.stop("encrypt data on CPU");
// Begin timing for total time on GPU(s).
overall.start();
timer.start();
hipStream_t str;
hipStreamCreate(&str);
// Data copy from CPU to GPU.
hipMemcpyAsync(data_gpu, data_cpu,
sizeof(uint64_t)*num_entries, hipMemcpyHostToDevice, str);
timer.stop("copy data from CPU to GPU");
check_last_error();
// non-default stream
timer.start();
// Decrypt data on GPU(s).
hipLaunchKernelGGL(( decrypt_gpu), dim3(80*32), dim3(64), 0, 0, data_gpu, num_entries, num_iters);
timer.stop("decrypt data on GPU");
//std::cout << " checkpoint 0\n";
check_last_error();
//std::cout << " checkpoint 1\n";
timer.start();
// Copy data from GPU to CPU.
hipMemcpyAsync(data_cpu, data_gpu,
sizeof(uint64_t)*num_entries, hipMemcpyDeviceToHost, str);
// Wait for memory transfer to complete before proceeding.
hipStreamSynchronize(str);
hipStreamDestroy(str);
timer.stop("copy data from GPU to CPU");
// Stop timer for total time on GPU(s).
overall.stop("total time on GPU");
check_last_error();
timer.start();
// Check results on CPU.
const bool success = check_result_cpu(data_cpu, num_entries, openmp);
std::cout << "STATUS: test "
<< ( success ? "passed" : "failed")
<< std::endl;
timer.stop("checking result on CPU");
timer.start();
// Free memory.
hipHostFree(data_cpu);
hipFree (data_gpu);
timer.stop("free memory");
check_last_error();
}
|
abc9f4302e9ae4873b89b787fa3beb999e89b111.cu
|
#include <cstdint>
#include <iostream>
#include "helpers.cuh"
#include "encryption.cuh"
// Host function.
void encrypt_cpu(uint64_t * data, uint64_t num_entries,
uint64_t num_iters, bool parallel=true) {
// Use OpenMP to use all available CPU cores.
#pragma omp parallel for if (parallel)
for (uint64_t entry = 0; entry < num_entries; entry++)
// Permute each data entry the number of iterations and then write result to data.
data[entry] = permute64(entry, num_iters);
}
// Device function.
__global__
void decrypt_gpu(uint64_t * data, uint64_t num_entries,
uint64_t num_iters) {
const uint64_t thrdID = blockIdx.x*blockDim.x+threadIdx.x;
const uint64_t stride = blockDim.x*gridDim.x;
//printf(" checkpoint i0\n");
// Utilize grid-stride loop for arbitrary data sizes.
for (uint64_t entry = thrdID; entry < num_entries; entry += stride)
// Unpermute each data entry the number of iterations then write result to data.
data[entry] = unpermute64(data[entry], num_iters);
//printf(" checkpoint i1\n");
}
// Host function.
bool check_result_cpu(uint64_t * data, uint64_t num_entries,
bool parallel=true) {
uint64_t counter = 0;
#pragma omp parallel for reduction(+: counter) if (parallel)
for (uint64_t entry = 0; entry < num_entries; entry++)
// Because we created initial data values by ranging from 0 to N-1,
// and because encrypting and decrypting is symmetrical,
// then each data entry should be equal to `entry`.
counter += data[entry] == entry;
// True if all values have been correctly decrypted.
return counter == num_entries;
}
int main (int argc, char * argv[]) {
// This file will be used to cache encryption results
// so we don't have to wait on the CPU every time.
//const char * encrypted_file = "/dli/task/encrypted";
const char * encrypted_file = "hello";
//"/home/babak/Codes/Learning/HPC/2_Cuda/6_Accelerating_CUDA_C++_Applications_with_Concurrent_Streams/3_Application/hello";
// Timer instance to be used for sections of the application.
Timer timer;
// Timer instance to be used for total time on the GPU(s).
Timer overall;
const uint64_t num_entries = 1UL << 26;
const uint64_t num_iters = 1UL << 10;
// Use all available CPUs in parallel for host calculations.
//const bool openmp = true;
const bool openmp = false;
// This timer start and then stop pattern will be used throughout the application.
timer.start();
uint64_t * data_cpu, * data_gpu;
// cudaMallocHost will be discussed at length later in the course.
cudaMallocHost(&data_cpu, sizeof(uint64_t)*num_entries);
cudaMalloc (&data_gpu, sizeof(uint64_t)*num_entries);
timer.stop("allocate memory");
check_last_error();
timer.start();
// If encryption cache file does not exist...
if (!encrypted_file_exists(encrypted_file)) {
// ...encrypt data in parallel on CPU...
std::cout << " encrypting... \n ";
encrypt_cpu(data_cpu, num_entries, num_iters, openmp);
// ...and make encryption cache file for later.
write_encrypted_to_file(encrypted_file, data_cpu, sizeof(uint64_t)*num_entries);
} else {
std::cout << " reading... \n ";
// Use encryption cache file if it exists.
read_encrypted_from_file(encrypted_file, data_cpu, sizeof(uint64_t)*num_entries);
}
timer.stop("encrypt data on CPU");
// Begin timing for total time on GPU(s).
overall.start();
timer.start();
cudaStream_t str;
cudaStreamCreate(&str);
// Data copy from CPU to GPU.
cudaMemcpyAsync(data_gpu, data_cpu,
sizeof(uint64_t)*num_entries, cudaMemcpyHostToDevice, str);
timer.stop("copy data from CPU to GPU");
check_last_error();
// non-default stream
timer.start();
// Decrypt data on GPU(s).
decrypt_gpu<<<80*32, 64>>>(data_gpu, num_entries, num_iters);
timer.stop("decrypt data on GPU");
//std::cout << " checkpoint 0\n";
check_last_error();
//std::cout << " checkpoint 1\n";
timer.start();
// Copy data from GPU to CPU.
cudaMemcpyAsync(data_cpu, data_gpu,
sizeof(uint64_t)*num_entries, cudaMemcpyDeviceToHost, str);
// Wait for memory transfer to complete before proceeding.
cudaStreamSynchronize(str);
cudaStreamDestroy(str);
timer.stop("copy data from GPU to CPU");
// Stop timer for total time on GPU(s).
overall.stop("total time on GPU");
check_last_error();
timer.start();
// Check results on CPU.
const bool success = check_result_cpu(data_cpu, num_entries, openmp);
std::cout << "STATUS: test "
<< ( success ? "passed" : "failed")
<< std::endl;
timer.stop("checking result on CPU");
timer.start();
// Free memory.
cudaFreeHost(data_cpu);
cudaFree (data_gpu);
timer.stop("free memory");
check_last_error();
}
|
15c3a029d00b3d774e8e319d1c55a2482cd21fa5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "math.h"
#include "time.h"
#include <iostream>
#include <fstream>
#include <iomanip>
#define BLOCK_SIZE 16
void print_matrices(float* matrix, char* file_Name, int x_dim, int y_dim, int dim)
{
std::ofstream outFile;
outFile.open(file_Name);
outFile << std::fixed;
outFile << std::setprecision(2);
for (int i = 0; i < x_dim; i++) {
for (int j = 0; j < y_dim; j++) {
outFile << matrix[i * dim + j] << " ";
}
outFile << std::endl;
}
}
__host__ void cpu_matrix_mult(float* h_a, float* h_b, float* h_result, int m) {
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < m; ++j)
{
float tmp = 0.0;
for (int h = 0; h < m; ++h)
{
tmp += h_a[i * m + h] * h_b[h * m + j];
}
h_result[i * m + j] = tmp;
}
}
}
__host__ int fill(float** Lmatrix, float** Rmatrix, int LdimX, int LdimY, int RdimX, int RdimY) {
int sqr_dim_X, sqr_dim_Y, size;
sqr_dim_X = RdimX;
if (LdimX > RdimX) {
sqr_dim_X = LdimX;
}
sqr_dim_Y = RdimY;
if (LdimY > RdimY) {
sqr_dim_Y = LdimY;
}
size = sqr_dim_Y;
if (sqr_dim_X > sqr_dim_Y) {
size = sqr_dim_X;
}
int temp = size / BLOCK_SIZE + (size % BLOCK_SIZE == 0 ? 0 : 1);
size = temp * BLOCK_SIZE;
size_t pt_size = size * size * sizeof(float);
*Lmatrix = (float*)malloc(pt_size);
*Rmatrix = (float*)malloc(pt_size);
memset(*Lmatrix, 0, pt_size);
memset(*Rmatrix, 0, pt_size);
for (int i = 0; i < LdimX; i++) {
for (int j = 0; j < LdimY; j++) {
int dummy = size * i + j;
(*Lmatrix)[dummy] = sinf(dummy);
}
}
for (int i = 0; i < RdimX; i++) {
for (int j = 0; j < RdimY; j++) {
int dummy = size * i + j;
(*Rmatrix)[dummy] = cosf(dummy);
}
}
return size;
}
__global__ void multiply(float* left, float* right, float* res, int dim) {
int i, j;
float temp = 0;
__shared__ float Left_shared_t[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Right_shared_t[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int tileNUM = 0; tileNUM < gridDim.x; tileNUM++) {
j = tileNUM * BLOCK_SIZE + threadIdx.x;
i = tileNUM * BLOCK_SIZE + threadIdx.y;
Left_shared_t[threadIdx.y][threadIdx.x] = left[row * dim + j];
Right_shared_t[threadIdx.y][threadIdx.x] = right[i * dim + col];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; k++) {
temp += Left_shared_t[threadIdx.y][k] * Right_shared_t[k][threadIdx.x];
}
__syncthreads();
}
res[row * dim + col] = temp;
}
int main(void)
{
int Left_matrix_x, Left_matrix_y, Right_matrix_x, Right_matrix_y, Left_vector_size, Right_vector_size;
float* Left_Vector_h, * Right_Vector_h, * Left_Vector_d, * Right_Vector_d, * Res_h, * Res_d, * CPU;
printf("Enter m n n k :\n");
scanf("%d %d %d %d", &Left_matrix_x, &Left_matrix_y, &Right_matrix_x, &Right_matrix_y);
int dim = fill(&Left_Vector_h, &Right_Vector_h, Left_matrix_x, Left_matrix_y, Right_matrix_x, Right_matrix_y);
print_matrices(Left_Vector_h, "Input_LHS", Left_matrix_x, Left_matrix_y, dim);
print_matrices(Right_Vector_h, "Input_RHS", Right_matrix_x, Right_matrix_y, dim);
size_t vector_size;
vector_size = dim * dim * sizeof(float);
Res_h = (float*)malloc(vector_size); // Allocate array on host for result
CPU = (float*)malloc(vector_size);// Allocate array on host for CPU_matrix_multiplication result
hipMalloc((void**)&Left_Vector_d, vector_size); // Allocate array on device for LHS operand
hipMalloc((void**)&Right_Vector_d, vector_size); // Allocate array on device for RHS operand but this is vector 1xN
hipMalloc((void**)&Res_d, vector_size); // Allocate array on device for result
hipMemcpy(Left_Vector_d, Left_Vector_h, vector_size, hipMemcpyHostToDevice); // copy values to device
hipMemcpy(Right_Vector_d, Right_Vector_h, vector_size, hipMemcpyHostToDevice); // copy values to device
//Block dimension is directly from block_size
dim3 Block_dim(BLOCK_SIZE, BLOCK_SIZE);
//Grid dimension is found by dividing matrix dimension to block_size
dim3 Grid_dim(dim / BLOCK_SIZE, dim / BLOCK_SIZE);
//commented out the functions which helps to calculate time
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//kernel call
multiply << < Grid_dim, Block_dim >> > (Left_Vector_d, Right_Vector_d, Res_d, dim);
//commented out the functions which helps to calculate time
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float et;
hipEventElapsedTime(&et, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
// Retrieve result from device and store it in host array
hipMemcpy(Res_h, Res_d, vector_size, hipMemcpyDeviceToHost);
clock_t begin = clock();
cpu_matrix_mult(Left_Vector_h, Right_Vector_h, CPU, dim); //matrix multiplication on cpu
clock_t end = clock();
double time_spent = (double)1000 * (end - begin) / CLOCKS_PER_SEC;
//commented out the functions which helps to calculate time
printf("GPU time= %f ms\n", et);
printf("CPU time= %lf ms\n", time_spent);
printf("Times Speed up= %f\n", time_spent / et);
//Prints the results
print_matrices(Res_h, "GPU_out", Left_matrix_x, Right_matrix_y, dim);
print_matrices(CPU, "CPU_out", Left_matrix_x, Right_matrix_y, dim);
bool eqaul = true;
for (int i = 0; i < Left_matrix_x && eqaul; i++) {
for (int j = 0; j < Right_matrix_y && eqaul; j++) {
if (abs(Res_h[i * dim + j] - CPU[i * dim + j]) > 0.001)
{
eqaul = false;
printf("NOT EQUAL\n");
}
}
}
if (eqaul)
{
std::cout << "Results are equal!" << std::endl;
}
else
{
std::cout << "Results are NOT equal!" << std::endl;
}
// Cleanup
free(Left_Vector_h);
free(Right_Vector_h);
free(Res_h);
free(CPU);
hipFree(Left_Vector_d);
hipFree(Right_Vector_d);
hipFree(Res_d);
}
|
15c3a029d00b3d774e8e319d1c55a2482cd21fa5.cu
|
#include <stdio.h>
#include <string.h>
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "math.h"
#include "time.h"
#include <iostream>
#include <fstream>
#include <iomanip>
#define BLOCK_SIZE 16
void print_matrices(float* matrix, char* file_Name, int x_dim, int y_dim, int dim)
{
std::ofstream outFile;
outFile.open(file_Name);
outFile << std::fixed;
outFile << std::setprecision(2);
for (int i = 0; i < x_dim; i++) {
for (int j = 0; j < y_dim; j++) {
outFile << matrix[i * dim + j] << " ";
}
outFile << std::endl;
}
}
__host__ void cpu_matrix_mult(float* h_a, float* h_b, float* h_result, int m) {
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < m; ++j)
{
float tmp = 0.0;
for (int h = 0; h < m; ++h)
{
tmp += h_a[i * m + h] * h_b[h * m + j];
}
h_result[i * m + j] = tmp;
}
}
}
__host__ int fill(float** Lmatrix, float** Rmatrix, int LdimX, int LdimY, int RdimX, int RdimY) {
int sqr_dim_X, sqr_dim_Y, size;
sqr_dim_X = RdimX;
if (LdimX > RdimX) {
sqr_dim_X = LdimX;
}
sqr_dim_Y = RdimY;
if (LdimY > RdimY) {
sqr_dim_Y = LdimY;
}
size = sqr_dim_Y;
if (sqr_dim_X > sqr_dim_Y) {
size = sqr_dim_X;
}
int temp = size / BLOCK_SIZE + (size % BLOCK_SIZE == 0 ? 0 : 1);
size = temp * BLOCK_SIZE;
size_t pt_size = size * size * sizeof(float);
*Lmatrix = (float*)malloc(pt_size);
*Rmatrix = (float*)malloc(pt_size);
memset(*Lmatrix, 0, pt_size);
memset(*Rmatrix, 0, pt_size);
for (int i = 0; i < LdimX; i++) {
for (int j = 0; j < LdimY; j++) {
int dummy = size * i + j;
(*Lmatrix)[dummy] = sinf(dummy);
}
}
for (int i = 0; i < RdimX; i++) {
for (int j = 0; j < RdimY; j++) {
int dummy = size * i + j;
(*Rmatrix)[dummy] = cosf(dummy);
}
}
return size;
}
__global__ void multiply(float* left, float* right, float* res, int dim) {
int i, j;
float temp = 0;
__shared__ float Left_shared_t[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Right_shared_t[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int tileNUM = 0; tileNUM < gridDim.x; tileNUM++) {
j = tileNUM * BLOCK_SIZE + threadIdx.x;
i = tileNUM * BLOCK_SIZE + threadIdx.y;
Left_shared_t[threadIdx.y][threadIdx.x] = left[row * dim + j];
Right_shared_t[threadIdx.y][threadIdx.x] = right[i * dim + col];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; k++) {
temp += Left_shared_t[threadIdx.y][k] * Right_shared_t[k][threadIdx.x];
}
__syncthreads();
}
res[row * dim + col] = temp;
}
int main(void)
{
int Left_matrix_x, Left_matrix_y, Right_matrix_x, Right_matrix_y, Left_vector_size, Right_vector_size;
float* Left_Vector_h, * Right_Vector_h, * Left_Vector_d, * Right_Vector_d, * Res_h, * Res_d, * CPU;
printf("Enter m n n k :\n");
scanf("%d %d %d %d", &Left_matrix_x, &Left_matrix_y, &Right_matrix_x, &Right_matrix_y);
int dim = fill(&Left_Vector_h, &Right_Vector_h, Left_matrix_x, Left_matrix_y, Right_matrix_x, Right_matrix_y);
print_matrices(Left_Vector_h, "Input_LHS", Left_matrix_x, Left_matrix_y, dim);
print_matrices(Right_Vector_h, "Input_RHS", Right_matrix_x, Right_matrix_y, dim);
size_t vector_size;
vector_size = dim * dim * sizeof(float);
Res_h = (float*)malloc(vector_size); // Allocate array on host for result
CPU = (float*)malloc(vector_size);// Allocate array on host for CPU_matrix_multiplication result
cudaMalloc((void**)&Left_Vector_d, vector_size); // Allocate array on device for LHS operand
cudaMalloc((void**)&Right_Vector_d, vector_size); // Allocate array on device for RHS operand but this is vector 1xN
cudaMalloc((void**)&Res_d, vector_size); // Allocate array on device for result
cudaMemcpy(Left_Vector_d, Left_Vector_h, vector_size, cudaMemcpyHostToDevice); // copy values to device
cudaMemcpy(Right_Vector_d, Right_Vector_h, vector_size, cudaMemcpyHostToDevice); // copy values to device
//Block dimension is directly from block_size
dim3 Block_dim(BLOCK_SIZE, BLOCK_SIZE);
//Grid dimension is found by dividing matrix dimension to block_size
dim3 Grid_dim(dim / BLOCK_SIZE, dim / BLOCK_SIZE);
//commented out the functions which helps to calculate time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//kernel call
multiply << < Grid_dim, Block_dim >> > (Left_Vector_d, Right_Vector_d, Res_d, dim);
//commented out the functions which helps to calculate time
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float et;
cudaEventElapsedTime(&et, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Retrieve result from device and store it in host array
cudaMemcpy(Res_h, Res_d, vector_size, cudaMemcpyDeviceToHost);
clock_t begin = clock();
cpu_matrix_mult(Left_Vector_h, Right_Vector_h, CPU, dim); //matrix multiplication on cpu
clock_t end = clock();
double time_spent = (double)1000 * (end - begin) / CLOCKS_PER_SEC;
//commented out the functions which helps to calculate time
printf("GPU time= %f ms\n", et);
printf("CPU time= %lf ms\n", time_spent);
printf("Times Speed up= %f\n", time_spent / et);
//Prints the results
print_matrices(Res_h, "GPU_out", Left_matrix_x, Right_matrix_y, dim);
print_matrices(CPU, "CPU_out", Left_matrix_x, Right_matrix_y, dim);
bool eqaul = true;
for (int i = 0; i < Left_matrix_x && eqaul; i++) {
for (int j = 0; j < Right_matrix_y && eqaul; j++) {
if (abs(Res_h[i * dim + j] - CPU[i * dim + j]) > 0.001)
{
eqaul = false;
printf("NOT EQUAL\n");
}
}
}
if (eqaul)
{
std::cout << "Results are equal!" << std::endl;
}
else
{
std::cout << "Results are NOT equal!" << std::endl;
}
// Cleanup
free(Left_Vector_h);
free(Right_Vector_h);
free(Res_h);
free(CPU);
cudaFree(Left_Vector_d);
cudaFree(Right_Vector_d);
cudaFree(Res_d);
}
|
c4595baafc323d7fc92391f0c0b98c31a1d1a958.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <iostream>
#include <algorithm>
#include <cstdlib>
#include <cstdio>
#include <fstream>
#include <string>
#include <rocblas.h>
#include <hiprand/hiprand.h>
#include <boost/numeric/ublas/matrix.hpp>
#include <boost/numeric/ublas/io.hpp>
#include <boost/numeric/ublas/lu.hpp>
#include "basic_operations.h"
#include "matrix_operations.h"
#include "defs01.h"
#include "tests01.h"
int main() {
tester_01();
return 0;
}
|
c4595baafc323d7fc92391f0c0b98c31a1d1a958.cu
|
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <iostream>
#include <algorithm>
#include <cstdlib>
#include <cstdio>
#include <fstream>
#include <string>
#include <cublas_v2.h>
#include <curand.h>
#include <boost/numeric/ublas/matrix.hpp>
#include <boost/numeric/ublas/io.hpp>
#include <boost/numeric/ublas/lu.hpp>
#include "basic_operations.h"
#include "matrix_operations.h"
#include "defs01.h"
#include "tests01.h"
int main() {
tester_01();
return 0;
}
|
b2fabd9258c58903b7a7c308bc952e156038609c.hip
|
// !!! This is a file automatically generated by hipify!!!
#define USE_DOUBLE 0
#if USE_DOUBLE > 0
#pragma OPENCL EXTENSION cl_khr_fp64: enable
#define FLOAT_TYPE double
#define FLOAT_TYPE4 double4
#define MAX_FLOAT_TYPE 1.7976931348623158e+308
#define MIN_FLOAT_TYPE -1.7976931348623158e+308
#else
#define FLOAT_TYPE float
#define FLOAT_TYPE4 float4
#define FLOAT_TYPE8 float8
#define MAX_FLOAT_TYPE 3.402823466e+38f
#define MIN_FLOAT_TYPE -3.402823466e+38f
#endif
#define VECSIZE 4
#define VECSIZE_8 8
#define MAX_KNN 30
// System includes
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <assert.h>
#include <time.h>
// CUDA runtime
#include <hip/hip_runtime.h>
//Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#include <device_launch_parameters.h>
typedef struct Convex_Node {
bool isLeaf;
int node_index;
int parent_index;
int leaf_index; //the leaf index of this node in all leaf nodes
int left_node;
int right_node;
} CONVEX_TREE;
/*----------pulic parameter used in __global__ functions-------------------*/
int *d_candidate_query_points_indexes = NULL;
FLOAT_TYPE *d_candidate_query_points_set=NULL;
int *d_candidate_query_points_appr_leaf_node_indexes = NULL;
FLOAT_TYPE *d_all_sorted_data_set=NULL;
int *d_sorted_data_set_indexes = NULL;
CONVEX_TREE *d_tree_struct = NULL;
FLOAT_TYPE *d_all_leaf_nodes_ALPHA_set=NULL;
FLOAT_TYPE *d_all_leaf_nodes_BETA_set=NULL;
int *d_all_constrains_num_of_each_leaf_nodes=NULL;
int *d_all_leaf_nodes_offsets_in_all_ALPHA=NULL;
int *d_all_leaf_nodes_ancestor_nodes_ids=NULL;
int *d_leaf_nodes_start_pos_in_sorted_data_set=NULL;
int *d_pts_num_in_sorted_leaf_nodes=NULL;
FLOAT_TYPE *d_dist_k_mins_global_tmp=NULL;
int *d_idx_k_mins_global_tmp=NULL;
long *d_dist_computation_times_arr=NULL;
long *d_quadprog_times_arr=NULL;
long *d_dist_computation_times_in_quadprog=NULL;
FLOAT_TYPE *d_nodes_centers=NULL;
/*----------pulic parameter used in __global__ functions-------------------*/
//free memory malloced in CUDA
void free_cuda_mem(){
if (d_candidate_query_points_indexes != NULL){
hipFree(d_candidate_query_points_indexes);
d_candidate_query_points_indexes=NULL;
}
if (d_candidate_query_points_appr_leaf_node_indexes != NULL){
hipFree(d_candidate_query_points_appr_leaf_node_indexes);
d_candidate_query_points_appr_leaf_node_indexes=NULL;
}
if (d_all_sorted_data_set != NULL){
hipFree(d_all_sorted_data_set);
d_all_sorted_data_set=NULL;
}
if (d_sorted_data_set_indexes != NULL){
hipFree(d_sorted_data_set_indexes);
d_sorted_data_set_indexes = NULL;
}
if (d_tree_struct != NULL){
hipFree(d_tree_struct);
d_tree_struct = NULL;
}
if (d_all_leaf_nodes_ALPHA_set != NULL){
hipFree(d_all_leaf_nodes_ALPHA_set);
d_all_leaf_nodes_ALPHA_set = NULL;
}
if (d_all_leaf_nodes_BETA_set != NULL){
hipFree(d_all_leaf_nodes_BETA_set);
d_all_leaf_nodes_BETA_set = NULL;
}
if (d_all_constrains_num_of_each_leaf_nodes != NULL){
hipFree(d_all_constrains_num_of_each_leaf_nodes);
d_all_constrains_num_of_each_leaf_nodes = NULL;
}
if (d_all_leaf_nodes_offsets_in_all_ALPHA != NULL){
hipFree(d_all_leaf_nodes_offsets_in_all_ALPHA);
d_all_leaf_nodes_offsets_in_all_ALPHA = NULL;
}
if (d_all_leaf_nodes_ancestor_nodes_ids != NULL){
hipFree(d_all_leaf_nodes_ancestor_nodes_ids);
d_all_leaf_nodes_ancestor_nodes_ids = NULL;
}
if (d_leaf_nodes_start_pos_in_sorted_data_set != NULL){
hipFree(d_leaf_nodes_start_pos_in_sorted_data_set);
d_leaf_nodes_start_pos_in_sorted_data_set = NULL;
}
if (d_pts_num_in_sorted_leaf_nodes != NULL){
hipFree(d_pts_num_in_sorted_leaf_nodes);
d_pts_num_in_sorted_leaf_nodes = NULL;
}
if (d_dist_k_mins_global_tmp != NULL){
hipFree(d_dist_k_mins_global_tmp);
d_dist_k_mins_global_tmp = NULL;
}
if (d_idx_k_mins_global_tmp != NULL){
hipFree(d_idx_k_mins_global_tmp);
d_idx_k_mins_global_tmp = NULL;
}
if (d_dist_computation_times_arr != NULL){
hipFree(d_dist_computation_times_arr);
d_dist_computation_times_arr = NULL;
}
if (d_quadprog_times_arr != NULL){
hipFree(d_quadprog_times_arr);
d_quadprog_times_arr = NULL;
}
if (d_dist_computation_times_in_quadprog != NULL){
hipFree(d_dist_computation_times_in_quadprog);
d_dist_computation_times_in_quadprog = NULL;
}
if (d_nodes_centers != NULL){
hipFree(d_nodes_centers);
d_nodes_centers = NULL;
}
}
//the inner product of q and p
__device__ FLOAT_TYPE scalar_product_cuda(FLOAT_TYPE* p, FLOAT_TYPE* q, int DIM){
//DIM will be written in "make_kernel_from_file"
FLOAT_TYPE result = 0;
for (int i = 0; i<DIM; i++){
result += p[i] * q[i];
}
return result;
}
__device__ FLOAT_TYPE float_dist_squre_cuda(FLOAT_TYPE *p, FLOAT_TYPE *q, int DIM){
FLOAT_TYPE dist_tmp=0, tmp=0;
for (int j = 0; j<DIM; j++){
tmp = (q[j] - p[j]);
dist_tmp += tmp*tmp;
}
return dist_tmp;
}
__device__ FLOAT_TYPE Compute_distance(FLOAT_TYPE *p1, FLOAT_TYPE *p2, int DIM){
FLOAT_TYPE sum = 0;
FLOAT_TYPE *end = p1 + DIM;
for (; p1 != end; p1++, p2++){
FLOAT_TYPE d1 = *p1 - *p2;
d1 *= d1;
sum = sum + d1;
}
return sqrt(sum);
}
//retrun a approximate min dist from q to this convex node
//d_min is still approximate distance, it can be improved or optimized.
//idea: if a point q is outside of this node, then max distance from
// q to each active constrains (hyperplanes) is the approximate
// distance. Because the \forall active hyperplane h, we have
// d_min >= dist(q,h);
__device__ FLOAT_TYPE approximate_min_dist_by_hyper_plane_cuda( FLOAT_TYPE* query_point,
FLOAT_TYPE* ALPHA,
FLOAT_TYPE* BETA,
int ALPPHA_size,
int DIM){
FLOAT_TYPE result = 0;
FLOAT_TYPE tmp_val = 0;
for (int i = 0; i<ALPPHA_size; i++)
{
//DIM will be written in "make_kernel_from_file"
FLOAT_TYPE* alpha = ALPHA + i*DIM;
FLOAT_TYPE beta = BETA[i];
tmp_val = scalar_product_cuda(alpha, query_point,DIM);
// if there exists a alpha and beta such that alpha[i]'* point +beta[i]<0
// point is not in the node
if (tmp_val<0){
if (result < -tmp_val){
result = -tmp_val;
}
}
}
return result;
}
//return true if the d_min from q to this node is larger than dist_compare.
//@param dist_compute_times_in_appr_quadprog: count the dist computation times here
__device__ bool is_appr_min_dist_from_q_larger_by_hyper_plane_cuda( FLOAT_TYPE *query_point,
FLOAT_TYPE *ALPHA,
FLOAT_TYPE *BETA,
int ALPPHA_size,
FLOAT_TYPE dist_compare,
long *dist_compute_times_in_appr_quadprog,
FLOAT_TYPE *query_point_scalar_product_from_all_nodes,
int *cur_ancestor_nodes_ids,
int DIM
)
{
bool result = false;
int tmp_times = 0;
int cur_ancestor_node_id = 0;
for (int i = 0; i<ALPPHA_size; i++){
FLOAT_TYPE tmp_dist = BETA[i];
//---ORIGINAL SCALAR PRODUCT, MANY DUPLICATION, but, in low dim it is faster.
for (int j = 0; j<DIM; j++){
tmp_dist += ALPHA[i*DIM + j] * query_point[j];
}
tmp_times++;
if (tmp_dist<0){
if (dist_compare <= (tmp_dist*tmp_dist)){
//if there exists one such hyper plane then return.
result = true;
break;
}
}
}
*dist_compute_times_in_appr_quadprog += tmp_times;
return result;
}
//brute force computing and update dist_k_mins_private_tmp and idx_k_mins_global_tmp
//pts_num: the number of points in all_sorted_data_set.
__device__ void do_brute_force_and_update_private_cuda(
FLOAT_TYPE *cur_query_point,
int cur_query_point_index,
int pts_num,
int cur_leaf_node_start_pos,
FLOAT_TYPE *all_sorted_data_set,
int *sorted_data_set_indexes,
FLOAT_TYPE *dist_k_mins_private_tmp,
int *idx_k_mins_private_tmp,
int K_NN,
int DIM)
{
FLOAT_TYPE dist_squre_tmp = 0;
FLOAT_TYPE tmp = 0;
int tmp_idx = 0;
for (int i = 0; i<pts_num; i++){
dist_squre_tmp = float_dist_squre_cuda(all_sorted_data_set + (cur_leaf_node_start_pos + i)*DIM, cur_query_point,DIM);
//get the current k^th min_dist_square of current query point
FLOAT_TYPE cur_k_min_dist_square = dist_k_mins_private_tmp[K_NN - 1];
if (cur_k_min_dist_square> dist_squre_tmp){
//printf("update dist_k_mins_private_tmp...\n");
//printf("cur_k_min_dist_square=%f, dist_squre_tmp=%f \n",cur_k_min_dist_square,dist_squre_tmp );
int j = K_NN - 1;
dist_k_mins_private_tmp[j] = dist_squre_tmp;
int pts_idx = sorted_data_set_indexes[cur_leaf_node_start_pos + i];
idx_k_mins_private_tmp[j] = pts_idx;
for (; j>0; j--){
if (dist_k_mins_private_tmp[j - 1] > dist_k_mins_private_tmp[j]){
//printf("new nn found, swap...");
tmp = dist_k_mins_private_tmp[j - 1];
dist_k_mins_private_tmp[j - 1] = dist_k_mins_private_tmp[j];
dist_k_mins_private_tmp[j] = tmp;
//swap indices
tmp_idx = idx_k_mins_private_tmp[j - 1];
idx_k_mins_private_tmp[j - 1] = idx_k_mins_private_tmp[j];
idx_k_mins_private_tmp[j] = tmp_idx;
}
else break;
}
}
}
}
//brute force computing and update dist_k_mins_private_tmp and idx_k_mins_global_tmp
//pts_num: the number of points in all_sorted_data_set.
__device__ void new_do_brute_force_and_update_private_cuda(
FLOAT_TYPE *cur_query_point,
int cur_query_point_index,
int pts_num,
int cur_leaf_node_start_pos,
FLOAT_TYPE *all_sorted_data_set,
int *sorted_data_set_indexes,
FLOAT_TYPE *dist_k_mins_private_tmp,
int *idx_k_mins_private_tmp,
int *remain_index,
int K_NN,
int DIM)
{
FLOAT_TYPE dist_squre_tmp = 0;
FLOAT_TYPE tmp = 0;
int tmp_idx = 0;
for (int i = 0; i<pts_num; i++){
dist_squre_tmp = float_dist_squre_cuda(all_sorted_data_set + (cur_leaf_node_start_pos + i)*DIM, cur_query_point, DIM);
//get the current k^th min_dist_square of current query point
FLOAT_TYPE cur_k_min_dist_square = dist_k_mins_private_tmp[K_NN - 1];
if (cur_k_min_dist_square> dist_squre_tmp){
//printf("update dist_k_mins_private_tmp...\n");
//printf("cur_k_min_dist_square=%f, dist_squre_tmp=%f \n",cur_k_min_dist_square,dist_squre_tmp );
int j = K_NN - 1;
dist_k_mins_private_tmp[j] = dist_squre_tmp;
int pts_idx = sorted_data_set_indexes[cur_leaf_node_start_pos + i];
idx_k_mins_private_tmp[j] = remain_index[pts_idx];
for (; j>0; j--){
if (dist_k_mins_private_tmp[j - 1] > dist_k_mins_private_tmp[j]){
//printf("new nn found, swap...");
tmp = dist_k_mins_private_tmp[j - 1];
dist_k_mins_private_tmp[j - 1] = dist_k_mins_private_tmp[j];
dist_k_mins_private_tmp[j] = tmp;
//swap indices
tmp_idx = idx_k_mins_private_tmp[j - 1];
idx_k_mins_private_tmp[j - 1] = idx_k_mins_private_tmp[j];
idx_k_mins_private_tmp[j] = tmp_idx;
}
else break;
}
}
}
}
/*
0 candidate_query_points_num : the number of current candidate query points, in the case of all query points set
is too large, we can submit subset of query sets to this kernel.
1 candidate_query_points_indexes : the indexes of current query points in all query points set
2 candidate_query_points_set : the current query points data set
3 candidate_query_points_appr_leaf_node_indexes : the approximate leaf node for candidate query points
4 all_sorted_data_set : all sorted data
5 sorted_data_set_indexes : all points indexes in sorted data set
6 tree_struct : the tree structure of the whole tree. It is not used now.
7 all_leaf_nodes_ALPHA_set : ALPHA set of all leaf nodes
8 leaf_nodes_BETA_set : BETA set of all leaf nodes
9 all_constrains_num_of_each_leaf_nodes : all_constrains_num_of_each_nodes[i]=j means i^th leaf nodes has j constrains, i.e. has j alphas and betas
10 all_leaf_nodes_offsets_in_all_ALPHA : the offset of each leaf node in ALPHA
11 leaf_node_num : the number of leaf nodes
12 all_leaf_nodes_ancestor_nodes_ids : the ancestor nodes ids of each leaf nodes
13 leaf_nodes_start_pos_in_sorted_data_set : specify the start position from which each sorted leave node in sorted data set
14 pts_num_in_sorted_leaf_nodes : the length of points saved in each sorted leaf node
15 dist_k_mins_global_tmp : the K min-distance of all query points, the length of dist_mins_global_tmp is K* query_points_size
16 idx_k_mins_global_tmp : the indexes of the K nearest neighbors, the length of dist_mins_global_tmp is K* query_points_size
17 K_NN : the value of K
18 dist_computation_times_arr : dist_computation_times_arr[i] saves total distance computation times of the i^th point and
19 quadprog_times_arr : quadprog_times_arr[i] approximate quadprog times of the i^th point.
20 dist_computation_times_in_quadprog: dist_computation_times_in_quadprog[i] saves the total distance computation times
in quadprog of the i^th point.
*/
__global__ void do_finding_KNN_by_leaf_order_cuda(
int candidate_query_points_num,
int *candidate_query_points_indexes,
FLOAT_TYPE *candidate_query_points_set,
int *candidate_query_points_appr_leaf_node_indexes,
FLOAT_TYPE *all_sorted_data_set,
int *sorted_data_set_indexes,
CONVEX_TREE *tree_struct,
FLOAT_TYPE *all_leaf_nodes_ALPHA_set,
FLOAT_TYPE *all_leaf_nodes_BETA_set,
int *all_constrains_num_of_each_leaf_nodes,
int *all_leaf_nodes_offsets_in_all_ALPHA,
int leaf_node_num,
int *all_leaf_nodes_ancestor_nodes_ids,
int *leaf_nodes_start_pos_in_sorted_data_set,
int *pts_num_in_sorted_leaf_nodes,
FLOAT_TYPE *dist_k_mins_global_tmp,
int *idx_k_mins_global_tmp,
int K_NN,
long *dist_computation_times_arr,
long *quadprog_times_arr,
long *dist_computation_times_in_quadprog,
int NODES_NUM,
int DIM,
int loop_id)
{
//---global thread id
//int tid = blockIdx.x;
//int tid = blockDim.x * blockIdx.x + threadIdx.x;
//int tid = (blockIdx.y * gridDim.x + blockIdx.x) * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.y + threadIdx.x;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tid = x + y * blockDim.x * gridDim.x;
//printf(" [loopid=%d, tid_ori=%d], ", loop_id,tid);
//tid +=loop_id* blocks_per_time;
if (tid >= candidate_query_points_num){
return;
}
//---count the distance computation times in approximate quadprog.
long cur_dist_compute_times_in_appr_quadprog = 0;
int cur_query_points_appr_leaf_node_indexes = candidate_query_points_appr_leaf_node_indexes[tid];
int cur_leaf_node_start_pos = leaf_nodes_start_pos_in_sorted_data_set[cur_query_points_appr_leaf_node_indexes];
/*---------------------------------------------------------------------------------------------------------------
//---query_points_nodes_alpha_scalar_product is not used in is_appr_min_dist_from_q_larger_by_hyper_plane now.
//---because visiting query_points_nodes_alpha_scalar_product randomly seems slow.
//---private scalar product between current query point and all ALPHAs, which are all initialized to 0.
//---each node has a alpha constraint, a well as constraints of its ancestors nodes.
//---'ALL_NODES_NUM' will be written before kernel is created.
----------------------------------------------------------------------------------------------------------------*/\
/*-----------------Copy global data as local data: visiting global data is relative slow in devices-----------------------------*/
int quadprog_times_private = 0;
for (int j = 0; j < K_NN; j++){
dist_k_mins_global_tmp[candidate_query_points_indexes[tid]*K_NN + j] = MAX_FLOAT_TYPE;
idx_k_mins_global_tmp[candidate_query_points_indexes[tid]*K_NN + j] = -1;
}
//---here is tid instead of cur_query_point_index, tid is the offset of current query point in candidate_query_points_set
FLOAT_TYPE* cur_query_point = candidate_query_points_set + tid*DIM;
/*-----------------------------------------------------------------------------------------------------------------------------*/
long dist_computation_times_tmp = 0;
int pts_num = pts_num_in_sorted_leaf_nodes[cur_query_points_appr_leaf_node_indexes];
//---find approximate kNN in its approximate nodes.
do_brute_force_and_update_private_cuda( cur_query_point, candidate_query_points_indexes[tid],
pts_num, cur_leaf_node_start_pos,
all_sorted_data_set, sorted_data_set_indexes,
dist_k_mins_global_tmp+candidate_query_points_indexes[tid]*K_NN,
idx_k_mins_global_tmp+candidate_query_points_indexes[tid]*K_NN,
K_NN, DIM);
//---add distance computation times
//dist_computation_times_tmp += pts_num;
for (int i = 0; i < leaf_node_num; i++) {
if (i == cur_query_points_appr_leaf_node_indexes)
continue;
int alpha_offset = all_leaf_nodes_offsets_in_all_ALPHA[i];
int constrains_num = all_constrains_num_of_each_leaf_nodes[i];
//---get the current k^th min_dist_square of current query point
FLOAT_TYPE cur_k_min_dist_square = dist_k_mins_global_tmp[candidate_query_points_indexes[tid]*K_NN+K_NN - 1];
FLOAT_TYPE* cur_ALPHAT = all_leaf_nodes_ALPHA_set + alpha_offset*DIM;
FLOAT_TYPE* cur_BETA = all_leaf_nodes_BETA_set + alpha_offset;
//---the number of ancestor nodes is the same as the size of constraints
int* cur_ancestor_nodes_ids = all_leaf_nodes_ancestor_nodes_ids + alpha_offset;
//---check whether the current node is a candidate for current query point
if (!is_appr_min_dist_from_q_larger_by_hyper_plane_cuda(cur_query_point, cur_ALPHAT, cur_BETA,
constrains_num, cur_k_min_dist_square,
&cur_dist_compute_times_in_appr_quadprog,
NULL,
cur_ancestor_nodes_ids,
DIM
) )
{
//---do brute force distance computation here, and update dist_k_mins_global_tmp and idx_k_mins_global_tmp
//---get the number of points saved in current node
//---i is cur leaf node index, not leaf node ori_index
int pts_num = pts_num_in_sorted_leaf_nodes[i];
int cur_leaf_node_start_pos = leaf_nodes_start_pos_in_sorted_data_set[i];
do_brute_force_and_update_private_cuda( cur_query_point, candidate_query_points_indexes[tid],
pts_num, cur_leaf_node_start_pos,
all_sorted_data_set, sorted_data_set_indexes,
dist_k_mins_global_tmp+candidate_query_points_indexes[tid]*K_NN,
idx_k_mins_global_tmp+candidate_query_points_indexes[tid]*K_NN,
K_NN, DIM);
}
}
}
/*
0 candidate_query_points_num : the number of current candidate query points, in the case of all query points set
is too large, we can submit subset of query sets to this kernel.
1 candidate_query_points_indexes : the indexes of current query points in all query points set
2 candidate_query_points_set : the current query points data set
candidate_query_points_appr_leaf_node_indexes : the approximate leaf node for candidate query points
4 all_sorted_data_set : all sorted data
5 sorted_data_set_indexes : all points indexes in sorted data set
6 tree_struct : the tree structure of the whole tree. It is not used now.
7 all_leaf_nodes_ALPHA_set : ALPHA set of all leaf nodes
8 leaf_nodes_BETA_set : BETA set of all leaf nodes
9 all_constrains_num_of_each_leaf_nodes : all_constrains_num_of_each_nodes[i]=j means i^th leaf nodes has j constrains, i.e. has j alphas and betas
10 all_leaf_nodes_offsets_in_all_ALPHA : the offset of each leaf node in ALPHA
11 leaf_node_num : the number of leaf nodes
12 all_leaf_nodes_ancestor_nodes_ids : the ancestor nodes ids of each leaf nodes
13 leaf_nodes_start_pos_in_sorted_data_set : specify the start position from which each sorted leave node in sorted data set
14 pts_num_in_sorted_leaf_nodes : the length of points saved in each sorted leaf node
15 dist_k_mins_global_tmp : the K min-distance of all query points, the length of dist_mins_global_tmp is K* query_points_size
16 idx_k_mins_global_tmp : the indexes of the K nearest neighbors, the length of dist_mins_global_tmp is K* query_points_size
17 K_NN : the value of K
18 dist_computation_times_arr : dist_computation_times_arr[i] saves total distance computation times of the i^th point and
19 quadprog_times_arr : quadprog_times_arr[i] approximate quadprog times of the i^th point.
20 dist_computation_times_in_quadprog: dist_computation_times_in_quadprog[i] saves the total distance computation times
in quadprog of the i^th point.
*/
__global__ void new_do_finding_KNN_by_leaf_order_cuda(
int candidate_query_points_num,
int *candidate_query_points_indexes,
FLOAT_TYPE *candidate_query_points_set,
FLOAT_TYPE *all_sorted_data_set,
int *sorted_data_set_indexes,
CONVEX_TREE *tree_struct,
FLOAT_TYPE *all_leaf_nodes_ALPHA_set,
FLOAT_TYPE *all_leaf_nodes_BETA_set,
int *all_constrains_num_of_each_leaf_nodes,
int *all_leaf_nodes_offsets_in_all_ALPHA,
int leaf_node_num,
int *all_leaf_nodes_ancestor_nodes_ids,
int *leaf_nodes_start_pos_in_sorted_data_set,
int *pts_num_in_sorted_leaf_nodes,
FLOAT_TYPE *dist_k_mins_global_tmp,
int *idx_k_mins_global_tmp,
int *remain_index,
int K_NN,
long *dist_computation_times_arr,
long *quadprog_times_arr,
long *dist_computation_times_in_quadprog,
int NODES_NUM,
int DIM,
int loop_id)
{
//---global thread id
//int tid = blockIdx.x;
//int tid = blockDim.x * blockIdx.x + threadIdx.x;
//int tid = (blockIdx.y * gridDim.x + blockIdx.x) * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.y + threadIdx.x;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tid = x + y * blockDim.x * gridDim.x;
//printf(" [loopid=%d, tid_ori=%d], ", loop_id,tid);
//tid +=loop_id* blocks_per_time;
if (tid >= candidate_query_points_num){
return;
}
//---count the distance computation times in approximate quadprog.
long cur_dist_compute_times_in_appr_quadprog = 0;
//int cur_query_points_appr_leaf_node_indexes = candidate_query_points_appr_leaf_node_indexes[tid];
//int cur_leaf_node_start_pos = leaf_nodes_start_pos_in_sorted_data_set[cur_query_points_appr_leaf_node_indexes];
/*---------------------------------------------------------------------------------------------------------------
//---query_points_nodes_alpha_scalar_product is not used in is_appr_min_dist_from_q_larger_by_hyper_plane now.
//---because visiting query_points_nodes_alpha_scalar_product randomly seems slow.
//---private scalar product between current query point and all ALPHAs, which are all initialized to 0.
//---each node has a alpha constraint, a well as constraints of its ancestors nodes.
//---'ALL_NODES_NUM' will be written before kernel is created.
----------------------------------------------------------------------------------------------------------------*/\
/*-----------------Copy global data as local data: visiting global data is relative slow in devices-----------------------------*/
int quadprog_times_private = 0;
/*for (int j = 0; j < K_NN; j++){
dist_k_mins_global_tmp[candidate_query_points_indexes[tid]*K_NN + j] = MAX_FLOAT_TYPE;
idx_k_mins_global_tmp[candidate_query_points_indexes[tid]*K_NN + j] = -1;
}
*/
//---here is tid instead of cur_query_point_index, tid is the offset of current query point in candidate_query_points_set
FLOAT_TYPE* cur_query_point = candidate_query_points_set + tid*DIM;
/*-----------------------------------------------------------------------------------------------------------------------------*/
long dist_computation_times_tmp = 0;
//int pts_num = pts_num_in_sorted_leaf_nodes[cur_query_points_appr_leaf_node_indexes];
//---find approximate kNN in its approximate nodes.
/*do_brute_force_and_update_private_cuda( cur_query_point, candidate_query_points_indexes[tid],
pts_num, cur_leaf_node_start_pos,
all_sorted_data_set, sorted_data_set_indexes,
dist_k_mins_global_tmp+candidate_query_points_indexes[tid]*K_NN,
idx_k_mins_global_tmp+candidate_query_points_indexes[tid]*K_NN,
K_NN, DIM);
*/
//---add distance computation times
//dist_computation_times_tmp += pts_num;
for (int i = 0; i < leaf_node_num; i++) {
//if (i == cur_query_points_appr_leaf_node_indexes)
//continue;
int alpha_offset = all_leaf_nodes_offsets_in_all_ALPHA[i];
int constrains_num = all_constrains_num_of_each_leaf_nodes[i];
//---get the current k^th min_dist_square of current query point
FLOAT_TYPE cur_k_min_dist_square = dist_k_mins_global_tmp[candidate_query_points_indexes[tid]*K_NN+K_NN - 1];
FLOAT_TYPE* cur_ALPHAT = all_leaf_nodes_ALPHA_set + alpha_offset*DIM;
FLOAT_TYPE* cur_BETA = all_leaf_nodes_BETA_set + alpha_offset;
//---the number of ancestor nodes is the same as the size of constraints
int* cur_ancestor_nodes_ids = all_leaf_nodes_ancestor_nodes_ids + alpha_offset;
//---check whether the current node is a candidate for current query point
if (!is_appr_min_dist_from_q_larger_by_hyper_plane_cuda(cur_query_point, cur_ALPHAT, cur_BETA,
constrains_num, cur_k_min_dist_square,
&cur_dist_compute_times_in_appr_quadprog,
NULL,
cur_ancestor_nodes_ids,
DIM
) )
{
//---do brute force distance computation here, and update dist_k_mins_global_tmp and idx_k_mins_global_tmp
//---get the number of points saved in current node
//---i is cur leaf node index, not leaf node ori_index
int pts_num = pts_num_in_sorted_leaf_nodes[i];
int cur_leaf_node_start_pos = leaf_nodes_start_pos_in_sorted_data_set[i];
new_do_brute_force_and_update_private_cuda( cur_query_point, candidate_query_points_indexes[tid],
pts_num, cur_leaf_node_start_pos,
all_sorted_data_set, sorted_data_set_indexes,
dist_k_mins_global_tmp+candidate_query_points_indexes[tid]*K_NN,
idx_k_mins_global_tmp+candidate_query_points_indexes[tid]*K_NN,
remain_index,K_NN, DIM);
}
}
}
__global__ void print_float_Data(FLOAT_TYPE *dist_k_mins_global_tmp, int* idx_k_mins_global_tmp, int loop_id){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int tid_ori=tid;
tid+= loop_id*1024;
int K_NN=30;
int cur_query_point_index=tid;
for (int j = 0; j<K_NN; j++){
printf(" [tid=%d, j=%d, dist =%f, idx=%d] ", tid, j, dist_k_mins_global_tmp[cur_query_point_index*K_NN + j], idx_k_mins_global_tmp[cur_query_point_index*K_NN + j]);
}
}
/*
//find kNN by brute force
0 data_set : the number of current candidate query points
1 data_set_size : cardinal
2 query_points : all query points
3 query_points_size : the length of query_points
4 dist_k_mins_global_tmp : the K min-distance of all query points,
the length of dist_mins_global_tmp is K* query_points_size
5 idx_k_mins_global_tmp : the indexes of the K nearest neighbors,
the length of dist_mins_global_tmp is K* query_points_size
6 K_NN : the value of K
*/
/*
__global__ void do_brute_force_KNN_cuda(
FLOAT_TYPE *data_set,int data_set_size,
FLOAT_TYPE *query_set,int query_set_size,
FLOAT_TYPE *KNN_index_with_dist,
int K,int DIM)
{
//global thread id
int tid = blockDim.x *blockIdx.x + threadIdx.x;
if (tid > query_set_size){
return;
}
unsigned int current_query_point_index = tid;
FLOAT_TYPE *temp = new FLOAT_TYPE[2 * K];
FLOAT_TYPE *p1, *p2;
int tmp;
FLOAT_TYPE d, max_dist, max_idx;
p1 = query_set + current_query_point_index*DIM;
for (int i = 0; i < data_set_size; i++){
p2 = data_set + i*DIM;
d = Compute_distance(p1, p2, DIM);
if (i < K){
temp[i * 2] = i;
temp[i * 2 + 1] = d;
}
else{
tmp = 0;
max_idx = temp[0];
max_dist = temp[1];
for (int j = 1; j < K; j++){
if (temp[2 * j + 1] > max_dist){
tmp = j;
max_idx = temp[2 * j];
max_dist = temp[2 * j + 1];
}
}
if (d < max_dist){
temp[tmp * 2] = i;
temp[tmp * 2 + 1] = d;
}
}
}
memcpy(KNN_index_with_dist + current_query_point_index * 2 * K, temp, (2 * K)*sizeof(FLOAT_TYPE));
//hipMemcpy(KNN_index_with_dist + current_query_point_index * 2 * K, temp, (2 * K)*sizeof(FLOAT_TYPE), hipMemcpyDeviceToDevice);
}
*/
__global__ void do_brute_force_KNN_cuda(
FLOAT_TYPE *data_set,
int data_set_size,
FLOAT_TYPE *query_points,
int query_points_size,
FLOAT_TYPE *dist_k_mins_global_tmp,
int *idx_k_mins_global_tmp,
int K_NN,
int DIM)
{
// global thread id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= query_points_size){
return;
}
unsigned int current_query_point_index = tid;
//---init the distance as MAX_FLOAT_TYPE
for (int i = 0; i<K_NN; i++){
dist_k_mins_global_tmp[current_query_point_index*K_NN + i] = MAX_FLOAT_TYPE;
}
//get the current k^th min_dist_square of current query point
FLOAT_TYPE cur_k_min_dist_square = dist_k_mins_global_tmp[current_query_point_index*K_NN + K_NN - 1];
//if (tid==checkid)
// printf("cur_k_min_dist_square =%f \n",cur_k_min_dist_square);
FLOAT_TYPE dist_square_tmp = 0;
FLOAT_TYPE tmp = 0;
int tmp_idx = 0;
//local copy
FLOAT_TYPE* cur_query_point_private = new FLOAT_TYPE[DIM];
for (int i = 0; i<DIM; i++){
cur_query_point_private[i] = query_points[current_query_point_index*DIM + i];
}
for (int i = 0; i<data_set_size; i++){
dist_square_tmp = 0;
cur_k_min_dist_square = dist_k_mins_global_tmp[current_query_point_index*K_NN + K_NN - 1];
for (int j = 0; j<DIM; j++){
tmp = data_set[i*DIM + j] - cur_query_point_private[j];
dist_square_tmp += tmp*tmp;
//printf("tmp =%f, dist_square_tmp=%f\n",tmp,dist_square_tmp);
}
//printf("dist_square_tmp =%f, cur_k_min_dist_square=%f \n",dist_square_tmp, cur_k_min_dist_square);
if (cur_k_min_dist_square> dist_square_tmp){
//printf("update dist_k_mins_global_tmp...\n");
int j = K_NN - 1;
dist_k_mins_global_tmp[current_query_point_index*K_NN + j] = dist_square_tmp;
idx_k_mins_global_tmp[current_query_point_index*K_NN + j] = i;
for (; j>0; j--){
if (dist_k_mins_global_tmp[current_query_point_index*K_NN + j - 1] > dist_k_mins_global_tmp[current_query_point_index*K_NN + j]){
//printf("new nn found, swap...");
tmp = dist_k_mins_global_tmp[current_query_point_index*K_NN + j - 1];
dist_k_mins_global_tmp[current_query_point_index*K_NN + j - 1] = dist_k_mins_global_tmp[current_query_point_index*K_NN + j];
dist_k_mins_global_tmp[current_query_point_index*K_NN + j] = tmp;
//swap indices
tmp_idx = idx_k_mins_global_tmp[current_query_point_index*K_NN + j - 1];
idx_k_mins_global_tmp[current_query_point_index*K_NN + j - 1] = idx_k_mins_global_tmp[current_query_point_index*K_NN + j];
idx_k_mins_global_tmp[current_query_point_index*K_NN + j] = tmp_idx;
}
else break;
}
}
}
}
/*
//find kNN by brute force for outliers
0 data_set : the number of current candidate query points
1 data_set_size : cardinal
2 query_points : all query points
3 query_points_size : the length of query_points
4 dist_k_mins_global_tmp : the K min-distance of all query points,
the length of dist_mins_global_tmp is K* query_points_size
5 idx_k_mins_global_tmp : the indexes of the K nearest neighbors,
the length of dist_mins_global_tmp is K* query_points_size
6 K_NN : the value of K
*/
__global__ void do_brute_force_KNN_for_outliers_cuda(
FLOAT_TYPE *data_set,
int data_set_size,
int *data_indexes,
FLOAT_TYPE *query_points,
int query_points_size,
FLOAT_TYPE *dist_k_mins_global_tmp,
int *idx_k_mins_global_tmp,
int K_NN,
int DIM)
{
// global thread id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//int tid = threadIdx.x;
//int tid = 0;
//printf("tid =%d \n",tid);
if (tid >= query_points_size){
return;
}
//printf("tid=%d, data_set_size =%d,query_points_size=%d \n",tid,data_set_size,query_points_size);
unsigned int current_query_point_index = tid;
//get the current k^th min_dist_square of current query point
FLOAT_TYPE cur_k_min_dist_square = dist_k_mins_global_tmp[current_query_point_index*K_NN + K_NN - 1];
//if (tid==checkid)
// printf("cur_k_min_dist_square =%f \n",cur_k_min_dist_square);
FLOAT_TYPE dist_square_tmp = 0;
FLOAT_TYPE tmp = 0;
int tmp_idx = 0;
//local copy
FLOAT_TYPE* cur_query_point_private = new FLOAT_TYPE[DIM];
for (int i = 0; i<DIM; i++){
cur_query_point_private[i] = query_points[current_query_point_index*DIM + i];
}
for (int i = 0; i < data_set_size; i++){
dist_square_tmp = 0;
cur_k_min_dist_square = dist_k_mins_global_tmp[current_query_point_index*K_NN + K_NN - 1];
for (int j = 0; j<DIM; j++){
tmp = data_set[i*DIM + j] - cur_query_point_private[j];
dist_square_tmp += tmp*tmp;
//printf("tmp =%f, dist_square_tmp=%f\n",tmp,dist_square_tmp);
}
//printf("dist_square_tmp =%f, cur_k_min_dist_square=%f \n",dist_square_tmp, cur_k_min_dist_square);
if (cur_k_min_dist_square> dist_square_tmp){
//printf("update dist_k_mins_global_tmp...\n");
int j = K_NN - 1;
dist_k_mins_global_tmp[current_query_point_index*K_NN + j] = dist_square_tmp;
idx_k_mins_global_tmp[current_query_point_index*K_NN + j] = data_indexes[i];
for (; j>0; j--){
if (dist_k_mins_global_tmp[current_query_point_index*K_NN + j - 1] > dist_k_mins_global_tmp[current_query_point_index*K_NN + j]){
//printf("new nn found, swap...");
tmp = dist_k_mins_global_tmp[current_query_point_index*K_NN + j - 1];
dist_k_mins_global_tmp[current_query_point_index*K_NN + j - 1] = dist_k_mins_global_tmp[current_query_point_index*K_NN + j];
dist_k_mins_global_tmp[current_query_point_index*K_NN + j] = tmp;
//swap indices
tmp_idx = idx_k_mins_global_tmp[current_query_point_index*K_NN + j - 1];
idx_k_mins_global_tmp[current_query_point_index*K_NN + j - 1] = idx_k_mins_global_tmp[current_query_point_index*K_NN + j];
idx_k_mins_global_tmp[current_query_point_index*K_NN + j] = tmp_idx;
}
else break;
}
}
}
}
__device__ int get_min_k_index_cuda(FLOAT_TYPE *dist_k_mins_private_tmp, int K_NN){
FLOAT_TYPE tmp_max = dist_k_mins_private_tmp[0];
int result = 0;
for (int i = 1; i<K_NN; i++){
if (dist_k_mins_private_tmp[i]>tmp_max){
result = i;
tmp_max = dist_k_mins_private_tmp[i];
}
}
return result;
}
bool init_CUDA_device(){
int count;
hipGetDeviceCount(&count);
//printf("\nDevice number: %d\n", count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
int i;
for (i = 0; i < count; i++) {
hipDeviceProp_t prop;
if (hipGetDeviceProperties(&prop, i) == hipSuccess) {
//printf("Device name :%s\n", prop.name);
//printf("Device major :%d\n", prop.major);
//printf("Device multiProcessorCount :%d\n", prop.multiProcessorCount);
//printf("Device maxThreadsPerBlock :%d\n", prop.maxThreadsPerBlock);
//printf("Device totalGlobalMem :%ld\n", prop.totalGlobalMem);
//printf("Device totalConstMem :%ld\n", prop.totalConstMem);
if (prop.major >= 1) {
break;
}
}
}
if (i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
hipSetDevice(i);
return true;
}
/**
* Host main routine
*/
extern "C" int call_cuda_kernel(
int candidate_query_points_num,
int *candidate_query_points_indexes,
FLOAT_TYPE *candidate_query_points_set,
int *candidate_query_points_appr_leaf_node_indexes,
int sorted_data_len,
FLOAT_TYPE *all_sorted_data_set,
int *sorted_data_set_indexes,
int tree_nodes_num,
CONVEX_TREE *tree_struct,
int all_leaf_nodes_constraint_num,
FLOAT_TYPE *all_leaf_nodes_ALPHA_set,
FLOAT_TYPE *all_leaf_nodes_BETA_set,
int *all_constrains_num_of_each_leaf_nodes,
int *all_leaf_nodes_offsets_in_all_ALPHA,
int leaf_node_num,
int *all_leaf_nodes_ancestor_nodes_ids,
int *leaf_nodes_start_pos_in_sorted_data_set,
int *pts_num_in_sorted_leaf_nodes,
FLOAT_TYPE *dist_k_mins_global_tmp,
int *idx_k_mins_global_tmp,
int K_NN,
long *dist_computation_times_arr,
long *quadprog_times_arr,
long *dist_computation_times_in_quadprog,
int NODES_NUM,
int DIM)
{
clock_t start, finish,start1, finish1;
float duration ;
bool cuda_init =init_CUDA_device();
if (cuda_init){
printf("\nsucced for initializing CUDA");
}
hipError_t err = hipSuccess;
//Launch the Vector Add CUDA Kernel
printf("\nCUDA Malloc Memory.....\n ");
start=clock();
size_t size = candidate_query_points_num * sizeof(int);
if (d_candidate_query_points_indexes==NULL){
err = hipMalloc((void **)&d_candidate_query_points_indexes, size);
err = hipMemcpy(d_candidate_query_points_indexes, candidate_query_points_indexes, size, hipMemcpyHostToDevice);
}
size = candidate_query_points_num * sizeof(FLOAT_TYPE)*DIM;
if (d_candidate_query_points_set==NULL){
err = hipMalloc((void **)&d_candidate_query_points_set, size);
err = hipMemcpy(d_candidate_query_points_set, candidate_query_points_set, size, hipMemcpyHostToDevice);
}
size = candidate_query_points_num * sizeof(int);
if (d_candidate_query_points_appr_leaf_node_indexes==NULL){
err = hipMalloc((void **)&d_candidate_query_points_appr_leaf_node_indexes, size);
err = hipMemcpy(d_candidate_query_points_appr_leaf_node_indexes, candidate_query_points_appr_leaf_node_indexes, size, hipMemcpyHostToDevice);
}
size = sorted_data_len * sizeof(FLOAT_TYPE)*DIM;
if (d_all_sorted_data_set==NULL){
err = hipMalloc((void **)&d_all_sorted_data_set, size);
err = hipMemcpy(d_all_sorted_data_set, all_sorted_data_set, size, hipMemcpyHostToDevice);
}
size = sorted_data_len * sizeof(int);
if (d_sorted_data_set_indexes==NULL){
err = hipMalloc((void **)&d_sorted_data_set_indexes, size);
err = hipMemcpy(d_sorted_data_set_indexes, sorted_data_set_indexes, size, hipMemcpyHostToDevice);
}
size=tree_nodes_num*sizeof(CONVEX_TREE);
if (d_tree_struct==NULL){
err = hipMalloc((void **)&d_tree_struct, size);
err = hipMemcpy(d_tree_struct, tree_struct, size, hipMemcpyHostToDevice);
}
size= all_leaf_nodes_constraint_num* sizeof(FLOAT_TYPE)*DIM;
if (d_all_leaf_nodes_ALPHA_set==NULL){
err = hipMalloc((void **)&d_all_leaf_nodes_ALPHA_set, size);
err = hipMemcpy(d_all_leaf_nodes_ALPHA_set, all_leaf_nodes_ALPHA_set, size, hipMemcpyHostToDevice);
}
size= all_leaf_nodes_constraint_num* sizeof(FLOAT_TYPE);
if (d_all_leaf_nodes_BETA_set==NULL){
err = hipMalloc((void **)&d_all_leaf_nodes_BETA_set, size);
err = hipMemcpy(d_all_leaf_nodes_BETA_set, all_leaf_nodes_BETA_set, size, hipMemcpyHostToDevice);
}
size= leaf_node_num*sizeof(int);
if (d_all_constrains_num_of_each_leaf_nodes==NULL){
err = hipMalloc((void **)&d_all_constrains_num_of_each_leaf_nodes, size);
err = hipMemcpy(d_all_constrains_num_of_each_leaf_nodes, all_constrains_num_of_each_leaf_nodes, size, hipMemcpyHostToDevice);
}
if (d_all_leaf_nodes_offsets_in_all_ALPHA==NULL){
err = hipMalloc((void **)&d_all_leaf_nodes_offsets_in_all_ALPHA, size);
err = hipMemcpy(d_all_leaf_nodes_offsets_in_all_ALPHA, all_leaf_nodes_offsets_in_all_ALPHA, size, hipMemcpyHostToDevice);
}
if (d_all_leaf_nodes_ancestor_nodes_ids==NULL){
err = hipMalloc((void **)&d_all_leaf_nodes_ancestor_nodes_ids, size);
err = hipMemcpy(d_all_leaf_nodes_ancestor_nodes_ids, all_leaf_nodes_ancestor_nodes_ids, size, hipMemcpyHostToDevice);
}
if (d_leaf_nodes_start_pos_in_sorted_data_set==NULL){
err = hipMalloc((void **)&d_leaf_nodes_start_pos_in_sorted_data_set, size);
err = hipMemcpy(d_leaf_nodes_start_pos_in_sorted_data_set, leaf_nodes_start_pos_in_sorted_data_set, size, hipMemcpyHostToDevice);
}
if (d_pts_num_in_sorted_leaf_nodes==NULL){
err = hipMalloc((void **)&d_pts_num_in_sorted_leaf_nodes, size);
err = hipMemcpy(d_pts_num_in_sorted_leaf_nodes, pts_num_in_sorted_leaf_nodes, size, hipMemcpyHostToDevice);
}
size= candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN;
if (d_dist_k_mins_global_tmp==NULL){
err = hipMalloc((void **)&d_dist_k_mins_global_tmp, size);
}
size= candidate_query_points_num* sizeof(int)* K_NN;
if (d_idx_k_mins_global_tmp==NULL){
err = hipMalloc((void **)&d_idx_k_mins_global_tmp, size);
}
size= candidate_query_points_num*sizeof(long);
err = hipMalloc((void **)&d_dist_computation_times_arr, size);
err = hipMalloc((void **)&d_quadprog_times_arr, size);
err = hipMalloc((void **)&d_dist_computation_times_in_quadprog, size);
int task_per_num=100000/256;
finish=clock();
duration = (float)(finish-start)/ CLOCKS_PER_SEC;
//printf( "\n CUDA Malloc Memory Time %f\n", duration);
//printf ("\n calling do_finding_KNN_by_leaf_order_cuda.....\n");
int block_num = candidate_query_points_num / 1024 + 1;
dim3 blocks(block_num,1), threads(1024,1);
//int blocks_per_time=6*16*128*8;
start1 = clock();
hipLaunchKernelGGL(( do_finding_KNN_by_leaf_order_cuda) , dim3(blocks), dim3(threads), 0, 0,
candidate_query_points_num,
d_candidate_query_points_indexes,
d_candidate_query_points_set,
d_candidate_query_points_appr_leaf_node_indexes,
d_all_sorted_data_set,
d_sorted_data_set_indexes,
d_tree_struct,
d_all_leaf_nodes_ALPHA_set,
d_all_leaf_nodes_BETA_set,
d_all_constrains_num_of_each_leaf_nodes,
d_all_leaf_nodes_offsets_in_all_ALPHA,
leaf_node_num,
d_all_leaf_nodes_ancestor_nodes_ids,
d_leaf_nodes_start_pos_in_sorted_data_set,
d_pts_num_in_sorted_leaf_nodes,
d_dist_k_mins_global_tmp,
d_idx_k_mins_global_tmp,
K_NN,
d_dist_computation_times_arr,
d_quadprog_times_arr,
d_dist_computation_times_in_quadprog,
NODES_NUM,
DIM,
1);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
//exit(EXIT_FAILURE);
}
finish1 = clock();
duration = (float)(finish1-start1)/ CLOCKS_PER_SEC;
//printf( "\n performing do_finding_KNN_by_leaf_order_cuda time %f milsec %f s\n",(float)(finish1-start1), duration);
//printf( "----print device matrix-------");
//print_float_Data <<<4, 256>>>(d_dist_k_mins_global_tmp,d_idx_k_mins_global_tmp,0);
// Copy the device result vector in device memory to the host result vector
// in host memory.
//printf("\n copy data from GPU....\n");
size= candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN;
err = hipMemcpy(dist_k_mins_global_tmp, d_dist_k_mins_global_tmp, size, hipMemcpyDeviceToHost);
size= candidate_query_points_num* sizeof(int)* K_NN;
err = hipMemcpy(idx_k_mins_global_tmp, d_idx_k_mins_global_tmp, size, hipMemcpyDeviceToHost);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
//exit(EXIT_FAILURE);
}
free_cuda_mem();
return 0;
}
/**
* Host main routine
*/
extern "C" int new_call_cuda_kernel(
int candidate_query_points_num,
int *candidate_query_points_indexes,
FLOAT_TYPE *candidate_query_points_set,
int sorted_data_len,
FLOAT_TYPE *all_sorted_data_set,
int *sorted_data_set_indexes,
int tree_nodes_num,
CONVEX_TREE *tree_struct,
int all_leaf_nodes_constraint_num,
FLOAT_TYPE *all_leaf_nodes_ALPHA_set,
FLOAT_TYPE *all_leaf_nodes_BETA_set,
int *all_constrains_num_of_each_leaf_nodes,
int *all_leaf_nodes_offsets_in_all_ALPHA,
int leaf_node_num,
int *all_leaf_nodes_ancestor_nodes_ids,
int *leaf_nodes_start_pos_in_sorted_data_set,
int *pts_num_in_sorted_leaf_nodes,
FLOAT_TYPE *dist_k_mins_global_tmp,
int *idx_k_mins_global_tmp,
int *remain_index,
int K_NN,
long *dist_computation_times_arr,
long *quadprog_times_arr,
long *dist_computation_times_in_quadprog,
int NODES_NUM,
int DIM)
{
clock_t start, finish, start1, finish1;
float duration;
bool cuda_init = init_CUDA_device();
if (cuda_init){
printf("\nsucced for initializing CUDA");
}
hipError_t err = hipSuccess;
//Launch the Vector Add CUDA Kernel
printf("\nCUDA Malloc Memory.....\n ");
start = clock();
size_t size = candidate_query_points_num * sizeof(int);
if (d_candidate_query_points_indexes == NULL){
err = hipMalloc((void **)&d_candidate_query_points_indexes, size);
err = hipMemcpy(d_candidate_query_points_indexes, candidate_query_points_indexes, size, hipMemcpyHostToDevice);
}
size = candidate_query_points_num * sizeof(FLOAT_TYPE)*DIM;
if (d_candidate_query_points_set == NULL){
err = hipMalloc((void **)&d_candidate_query_points_set, size);
err = hipMemcpy(d_candidate_query_points_set, candidate_query_points_set, size, hipMemcpyHostToDevice);
}
size = sorted_data_len * sizeof(FLOAT_TYPE)*DIM;
if (d_all_sorted_data_set == NULL){
err = hipMalloc((void **)&d_all_sorted_data_set, size);
err = hipMemcpy(d_all_sorted_data_set, all_sorted_data_set, size, hipMemcpyHostToDevice);
}
size = sorted_data_len * sizeof(int);
if (d_sorted_data_set_indexes == NULL){
err = hipMalloc((void **)&d_sorted_data_set_indexes, size);
err = hipMemcpy(d_sorted_data_set_indexes, sorted_data_set_indexes, size, hipMemcpyHostToDevice);
}
size = tree_nodes_num*sizeof(CONVEX_TREE);
if (d_tree_struct == NULL){
err = hipMalloc((void **)&d_tree_struct, size);
err = hipMemcpy(d_tree_struct, tree_struct, size, hipMemcpyHostToDevice);
}
size = all_leaf_nodes_constraint_num* sizeof(FLOAT_TYPE)*DIM;
if (d_all_leaf_nodes_ALPHA_set == NULL){
err = hipMalloc((void **)&d_all_leaf_nodes_ALPHA_set, size);
err = hipMemcpy(d_all_leaf_nodes_ALPHA_set, all_leaf_nodes_ALPHA_set, size, hipMemcpyHostToDevice);
}
size = all_leaf_nodes_constraint_num* sizeof(FLOAT_TYPE);
if (d_all_leaf_nodes_BETA_set == NULL){
err = hipMalloc((void **)&d_all_leaf_nodes_BETA_set, size);
err = hipMemcpy(d_all_leaf_nodes_BETA_set, all_leaf_nodes_BETA_set, size, hipMemcpyHostToDevice);
}
size = leaf_node_num*sizeof(int);
if (d_all_constrains_num_of_each_leaf_nodes == NULL){
err = hipMalloc((void **)&d_all_constrains_num_of_each_leaf_nodes, size);
err = hipMemcpy(d_all_constrains_num_of_each_leaf_nodes, all_constrains_num_of_each_leaf_nodes, size, hipMemcpyHostToDevice);
}
if (d_all_leaf_nodes_offsets_in_all_ALPHA == NULL){
err = hipMalloc((void **)&d_all_leaf_nodes_offsets_in_all_ALPHA, size);
err = hipMemcpy(d_all_leaf_nodes_offsets_in_all_ALPHA, all_leaf_nodes_offsets_in_all_ALPHA, size, hipMemcpyHostToDevice);
}
if (d_all_leaf_nodes_ancestor_nodes_ids == NULL){
err = hipMalloc((void **)&d_all_leaf_nodes_ancestor_nodes_ids, size);
err = hipMemcpy(d_all_leaf_nodes_ancestor_nodes_ids, all_leaf_nodes_ancestor_nodes_ids, size, hipMemcpyHostToDevice);
}
if (d_leaf_nodes_start_pos_in_sorted_data_set == NULL){
err = hipMalloc((void **)&d_leaf_nodes_start_pos_in_sorted_data_set, size);
err = hipMemcpy(d_leaf_nodes_start_pos_in_sorted_data_set, leaf_nodes_start_pos_in_sorted_data_set, size, hipMemcpyHostToDevice);
}
if (d_pts_num_in_sorted_leaf_nodes == NULL){
err = hipMalloc((void **)&d_pts_num_in_sorted_leaf_nodes, size);
err = hipMemcpy(d_pts_num_in_sorted_leaf_nodes, pts_num_in_sorted_leaf_nodes, size, hipMemcpyHostToDevice);
}
size = candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN;
if (d_dist_k_mins_global_tmp == NULL){
err = hipMalloc((void **)&d_dist_k_mins_global_tmp, size);
err = hipMemcpy(d_dist_k_mins_global_tmp, dist_k_mins_global_tmp, size, hipMemcpyHostToDevice);
}
size = candidate_query_points_num* sizeof(int)* K_NN;
if (d_idx_k_mins_global_tmp == NULL){
err = hipMalloc((void **)&d_idx_k_mins_global_tmp, size);
err = hipMemcpy(d_idx_k_mins_global_tmp, idx_k_mins_global_tmp, size, hipMemcpyHostToDevice);
}
size = sorted_data_len * sizeof(int);
int* d_remain_index = NULL;
err = hipMalloc((void **)&d_remain_index, size);
err = hipMemcpy(d_remain_index, remain_index, size, hipMemcpyHostToDevice);
size = candidate_query_points_num*sizeof(long);
err = hipMalloc((void **)&d_dist_computation_times_arr, size);
err = hipMalloc((void **)&d_quadprog_times_arr, size);
err = hipMalloc((void **)&d_dist_computation_times_in_quadprog, size);
int task_per_num = 100000 / 256;
finish = clock();
duration = (float)(finish - start) / CLOCKS_PER_SEC;
//printf( "\n CUDA Malloc Memory Time %f\n", duration);
//printf ("\n calling do_finding_KNN_by_leaf_order_cuda.....\n");
int block_num = candidate_query_points_num / 1024 + 1;
dim3 blocks(block_num, 1), threads(1024, 1);
//int blocks_per_time = 6 * 16 * 128 * 8;
start1 = clock();
new_do_finding_KNN_by_leaf_order_cuda << < blocks, threads >> >(
candidate_query_points_num,
d_candidate_query_points_indexes,
d_candidate_query_points_set,
d_all_sorted_data_set,
d_sorted_data_set_indexes,
d_tree_struct,
d_all_leaf_nodes_ALPHA_set,
d_all_leaf_nodes_BETA_set,
d_all_constrains_num_of_each_leaf_nodes,
d_all_leaf_nodes_offsets_in_all_ALPHA,
leaf_node_num,
d_all_leaf_nodes_ancestor_nodes_ids,
d_leaf_nodes_start_pos_in_sorted_data_set,
d_pts_num_in_sorted_leaf_nodes,
d_dist_k_mins_global_tmp,
d_idx_k_mins_global_tmp,
d_remain_index,
K_NN,
d_dist_computation_times_arr,
d_quadprog_times_arr,
d_dist_computation_times_in_quadprog,
NODES_NUM,
DIM,
1);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
//exit(EXIT_FAILURE);
}
finish1 = clock();
duration = (float)(finish1 - start1) / CLOCKS_PER_SEC;
//printf( "\n performing do_finding_KNN_by_leaf_order_cuda time %f milsec %f s\n",(float)(finish1-start1), duration);
//printf( "----print device matrix-------");
//print_float_Data <<<4, 256>>>(d_dist_k_mins_global_tmp,d_idx_k_mins_global_tmp,0);
// Copy the device result vector in device memory to the host result vector
// in host memory.
//printf("\n copy data from GPU....\n");
size = candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN;
err = hipMemcpy(dist_k_mins_global_tmp, d_dist_k_mins_global_tmp, size, hipMemcpyDeviceToHost);
size = candidate_query_points_num* sizeof(int)* K_NN;
err = hipMemcpy(idx_k_mins_global_tmp, d_idx_k_mins_global_tmp, size, hipMemcpyDeviceToHost);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
//exit(EXIT_FAILURE);
}
free_cuda_mem();
return 0;
}
__global__ void kernel_do_find_approximate_nodes(
int candidate_query_points_num,
FLOAT_TYPE *candidate_query_points_set,
int tree_nodes_num,
CONVEX_TREE *tree_struct,
FLOAT_TYPE *nodes_centers,
int *appr_leaf_node_indexes,
int DIM
){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= candidate_query_points_num) return;
FLOAT_TYPE *q = candidate_query_points_set + tid * DIM;
int cur_node_index=0;
while (tree_struct[cur_node_index].isLeaf == false){
FLOAT_TYPE left_dist_squre = float_dist_squre_cuda(q, nodes_centers+tree_struct[cur_node_index].left_node*DIM,DIM);
FLOAT_TYPE right_dist_squre = float_dist_squre_cuda(q, nodes_centers+tree_struct[cur_node_index].right_node*DIM ,DIM);
//count the distance computation times.
if (left_dist_squre >= right_dist_squre){
cur_node_index = tree_struct[cur_node_index].right_node;
}
else{
cur_node_index = tree_struct[cur_node_index].left_node;
}
}
appr_leaf_node_indexes[tid] = tree_struct[cur_node_index].leaf_index;
}
extern "C" void call_do_find_approximate_nodes(
int candidate_query_points_num,
FLOAT_TYPE *candidate_query_points_set,
int tree_nodes_num,
CONVEX_TREE *tree_struct,
FLOAT_TYPE *nodes_centers,
int *candidate_query_points_appr_leaf_node_indexes,
int DIM
){
size_t size = candidate_query_points_num * sizeof(int);
hipError_t err = hipSuccess;
size = candidate_query_points_num * sizeof(FLOAT_TYPE)*DIM;
err = hipMalloc((void **)&d_candidate_query_points_set, size);
err = hipMemcpy(d_candidate_query_points_set, candidate_query_points_set, size, hipMemcpyHostToDevice);
size = tree_nodes_num * sizeof(CONVEX_TREE);
err = hipMalloc((void **)&d_tree_struct, size);
err = hipMemcpy(d_tree_struct, tree_struct, size, hipMemcpyHostToDevice);
size = candidate_query_points_num * sizeof(int);
err = hipMalloc((void **)&d_candidate_query_points_appr_leaf_node_indexes, size);
size= tree_nodes_num*sizeof(FLOAT_TYPE)*DIM;
err = hipMalloc((void **)&d_nodes_centers, size);
err = hipMemcpy(d_nodes_centers, nodes_centers, size, hipMemcpyHostToDevice);
int block_num =candidate_query_points_num/1024 +1;
dim3 blocks(block_num,1), threads(1024,1);
hipLaunchKernelGGL(( kernel_do_find_approximate_nodes), dim3(blocks), dim3(threads), 0, 0, candidate_query_points_num,
d_candidate_query_points_set,
tree_nodes_num,
d_tree_struct,
d_nodes_centers,
d_candidate_query_points_appr_leaf_node_indexes,
DIM);
err = hipGetLastError();
if (err != hipSuccess){
printf("Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
}
size = candidate_query_points_num * sizeof(int);
err = hipMemcpy(candidate_query_points_appr_leaf_node_indexes, d_candidate_query_points_appr_leaf_node_indexes, size, hipMemcpyDeviceToHost);
err = hipGetLastError();
if (err != hipSuccess){
printf("Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
}
}
/**
* Host main routine
*/
/*
extern "C" int call_cuda_kernel_brute_force_and_update(
int candidate_query_points_num,
int *candidate_query_points_indexes,
FLOAT_TYPE *candidate_query_points_set,
int data_set_size,
FLOAT_TYPE *data_set,
FLOAT_TYPE *KNN_index_with_dist,
int K_NN,
int DIM)
{
clock_t start, finish, start1, finish1;
bool cuda_init = init_CUDA_device();
if (cuda_init){
printf("succed for initializing CUDA\n");
}
hipError_t err = hipSuccess;
//Launch the Vector Add CUDA Kernel
printf("CUDA Malloc Memory.....\n");
start = clock();
size_t size = candidate_query_points_num * sizeof(int);
int *d_candidate_query_points_indexes = NULL;
err = hipMalloc((void **)&d_candidate_query_points_indexes, size);
err = hipMemcpy(d_candidate_query_points_indexes, candidate_query_points_indexes, size, hipMemcpyHostToDevice);
size = candidate_query_points_num * sizeof(FLOAT_TYPE)*DIM;
FLOAT_TYPE *d_candidate_query_points_set = NULL;
err = hipMalloc((void **)&d_candidate_query_points_set, size);
err = hipMemcpy(d_candidate_query_points_set, candidate_query_points_set, size, hipMemcpyHostToDevice);
size = data_set_size * sizeof(FLOAT_TYPE)*DIM;
FLOAT_TYPE *d_data_set = NULL;
err = hipMalloc((void **)&d_data_set, size);
err = hipMemcpy(d_data_set, data_set, size, hipMemcpyHostToDevice);
size = candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN * 2;
FLOAT_TYPE *d_KNN_index_with_dist = NULL;
err = hipMalloc((void **)&d_KNN_index_with_dist, size);
finish = clock();
double duration = (double)(finish - start) / CLOCKS_PER_SEC;
printf("CUDA Malloc Memory Time %fs\n", duration);
printf("calling do_finding_KNN_by_leaf_order_cuda.....\n");
dim3 grids(2, 1), blocks(6, 1), threads(1024, 1);
start1 = clock();
do_brute_force_KNN_cuda << < blocks, threads >> >
(d_data_set, data_set_size, d_candidate_query_points_set, candidate_query_points_num,
d_KNN_index_with_dist, K_NN, DIM);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
//exit(EXIT_FAILURE);
}
//printf( "----print device matrix-------");
//print_float_Data <<<4, 256>>>(d_dist_k_mins_global_tmp,d_idx_k_mins_global_tmp,0);
// Copy the device result vector in device memory to the host result vector
// in host memory.
//printf("\n copy data from GPU....\n");
size = candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN * 2;
err = hipMemcpy(KNN_index_with_dist, d_KNN_index_with_dist, size, hipMemcpyDeviceToHost);
//size= candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN;
//err = hipMemcpy(dist_k_mins_global_tmp, d_dist_k_mins_global_tmp, size, hipMemcpyDeviceToHost);
//size= candidate_query_points_num* sizeof(int)* K_NN;
//err = hipMemcpy(idx_k_mins_global_tmp, d_idx_k_mins_global_tmp, size, hipMemcpyDeviceToHost);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
//exit(EXIT_FAILURE);
}
finish1 = clock();
duration = (double)(finish1 - start1) / CLOCKS_PER_SEC;
printf("performing do_finding_KNN_by_leaf_order_cuda time %fs\n", duration);
return 0;
}
*/
/**
* Host main routine
*/
extern "C" int call_cuda_kernel_brute_force(
int candidate_query_points_num,
int *candidate_query_points_indexes,
FLOAT_TYPE *candidate_query_points_set,
int data_set_size,
FLOAT_TYPE *data_set,
FLOAT_TYPE *dist_k_mins_global_tmp,
int *idx_k_mins_global_tmp,
int K_NN,
int DIM)
{
clock_t start, finish,start1, finish1;
bool cuda_init =init_CUDA_device();
if (cuda_init){
printf("succed for initializing CUDA\n");
}
hipError_t err = hipSuccess;
//Launch the Vector Add CUDA Kernel
printf("CUDA Malloc Memory.....\n");
start=clock();
size_t size = candidate_query_points_num * sizeof(int);
int *d_candidate_query_points_indexes = NULL;
err = hipMalloc((void **)&d_candidate_query_points_indexes, size);
err = hipMemcpy(d_candidate_query_points_indexes, candidate_query_points_indexes, size, hipMemcpyHostToDevice);
for (int i = 0; i < 100; i++){
for (int j = 0; j < DIM; j++){
printf("%.1f ", candidate_query_points_set[i*DIM + j]);
}
printf("\n");
}
size = candidate_query_points_num * sizeof(FLOAT_TYPE)*DIM;
FLOAT_TYPE *d_candidate_query_points_set=NULL;
err = hipMalloc((void **)&d_candidate_query_points_set, size);
err = hipMemcpy(d_candidate_query_points_set, candidate_query_points_set, size, hipMemcpyHostToDevice);
size = data_set_size * sizeof(FLOAT_TYPE)*DIM;
FLOAT_TYPE *d_data_set=NULL;
err = hipMalloc((void **)&d_data_set, size);
err = hipMemcpy(d_data_set, data_set, size, hipMemcpyHostToDevice);
size= candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN;
FLOAT_TYPE *d_dist_k_mins_global_dist = NULL;
err = hipMalloc((void **)&d_dist_k_mins_global_dist, size);
size= candidate_query_points_num* sizeof(int)* K_NN;
int *d_idx_k_mins_global_tmp=NULL;
err = hipMalloc((void **)&d_idx_k_mins_global_tmp, size);
finish=clock();
double duration = (double)(finish-start)/ CLOCKS_PER_SEC;
printf( "CUDA Malloc Memory Time %fs\n", duration);
printf ("calling do_finding_KNN_by_leaf_order_cuda.....\n");
int block_num = candidate_query_points_num / 1024 + 1;
dim3 blocks(block_num, 1), threads(1024, 1);
start1=clock();
hipLaunchKernelGGL(( do_brute_force_KNN_cuda), dim3(blocks), dim3(threads), 0, 0,
d_data_set, data_set_size, d_candidate_query_points_set, candidate_query_points_num,
d_dist_k_mins_global_dist, d_idx_k_mins_global_tmp, K_NN, DIM);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
//exit(EXIT_FAILURE);
}
printf("ending......\n");
size= candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN;
err = hipMemcpy(dist_k_mins_global_tmp, d_dist_k_mins_global_tmp, size, hipMemcpyDeviceToHost);
size= candidate_query_points_num* sizeof(int)* K_NN;
err = hipMemcpy(idx_k_mins_global_tmp, d_idx_k_mins_global_tmp, size, hipMemcpyDeviceToHost);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
//exit(EXIT_FAILURE);
}
finish1=clock();
duration = (double)(finish1-start1)/ CLOCKS_PER_SEC;
printf( "performing do_finding_KNN_by_leaf_order_cuda time %fs\n", duration);
return 0;
}
/**
* Host main routine
*/
extern "C" int call_cuda_kernel_brute_force_for_outliers(
int candidate_query_points_num,
int *candidate_query_points_indexes,
FLOAT_TYPE *candidate_query_points_set,
int data_set_size,
FLOAT_TYPE *data_set,
int *data_indexes,
FLOAT_TYPE *dist_k_mins_global_tmp,
int *idx_k_mins_global_tmp,
int K_NN,
int DIM)
{
clock_t start, finish, start1, finish1;
bool cuda_init = init_CUDA_device();
/*
if (cuda_init){
printf("succed for initializing CUDA\n");
}
*/
hipError_t err = hipSuccess;
//Launch the Vector Add CUDA Kernel
//printf("CUDA Malloc Memory.....\n");
start = clock();
size_t size = candidate_query_points_num * sizeof(int);
int *d_candidate_query_points_indexes = NULL;
err = hipMalloc((void **)&d_candidate_query_points_indexes, size);
err = hipMemcpy(d_candidate_query_points_indexes, candidate_query_points_indexes, size, hipMemcpyHostToDevice);
size = candidate_query_points_num * sizeof(FLOAT_TYPE)*DIM;
FLOAT_TYPE *d_candidate_query_points_set = NULL;
err = hipMalloc((void **)&d_candidate_query_points_set, size);
err = hipMemcpy(d_candidate_query_points_set, candidate_query_points_set, size, hipMemcpyHostToDevice);
size = data_set_size * sizeof(FLOAT_TYPE)*DIM;
FLOAT_TYPE *d_data_set = NULL;
err = hipMalloc((void **)&d_data_set, size);
err = hipMemcpy(d_data_set, data_set, size, hipMemcpyHostToDevice);
size = data_set_size * sizeof(int);
int *d_data_indexes = NULL;
err = hipMalloc((void **)&d_data_indexes, size);
err = hipMemcpy(d_data_indexes, data_indexes, size, hipMemcpyHostToDevice);
size = candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN;
FLOAT_TYPE *d_dist_k_mins_global_dist = NULL;
err = hipMalloc((void **)&d_dist_k_mins_global_dist, size);
err = hipMemcpy(d_dist_k_mins_global_dist, dist_k_mins_global_tmp, size, hipMemcpyHostToDevice);
size = candidate_query_points_num* sizeof(int)* K_NN;
int *d_idx_k_mins_global_tmp = NULL;
err = hipMalloc((void **)&d_idx_k_mins_global_tmp, size);
err = hipMemcpy(d_idx_k_mins_global_tmp, idx_k_mins_global_tmp, size, hipMemcpyHostToDevice);
finish = clock();
double duration = (double)(finish - start) / CLOCKS_PER_SEC;
//printf("CUDA Malloc Memory Time %fs\n", duration);
//printf("calling do_finding_KNN_by_leaf_order_cuda.....\n");
int block_num = candidate_query_points_num / 1024 + 1;
dim3 blocks(block_num, 1), threads(1024, 1);
start1 = clock();
do_brute_force_KNN_for_outliers_cuda << < blocks, threads >> >
(d_data_set, data_set_size, d_data_indexes, d_candidate_query_points_set, candidate_query_points_num,
d_dist_k_mins_global_dist, d_idx_k_mins_global_tmp, K_NN, DIM);
err = hipGetLastError();
if (err != hipSuccess)
{
//fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
//exit(EXIT_FAILURE);
}
//printf( "----print device matrix-------");
//print_float_Data <<<4, 256>>>(d_dist_k_mins_global_tmp,d_idx_k_mins_global_tmp,0);
// Copy the device result vector in device memory to the host result vector
// in host memory.
//printf("\n copy data from GPU....\n");
size = candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN;
err = hipMemcpy(dist_k_mins_global_tmp, d_dist_k_mins_global_tmp, size, hipMemcpyDeviceToHost);
size = candidate_query_points_num* sizeof(int)* K_NN;
err = hipMemcpy(idx_k_mins_global_tmp, d_idx_k_mins_global_tmp, size, hipMemcpyDeviceToHost);
err = hipGetLastError();
if (err != hipSuccess)
{
//fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
//exit(EXIT_FAILURE);
}
finish1 = clock();
duration = (double)(finish1 - start1) / CLOCKS_PER_SEC;
//printf("performing do_finding_KNN_by_leaf_order_cuda time %fs\n", duration);
return 0;
}
|
b2fabd9258c58903b7a7c308bc952e156038609c.cu
|
#define USE_DOUBLE 0
#if USE_DOUBLE > 0
#pragma OPENCL EXTENSION cl_khr_fp64: enable
#define FLOAT_TYPE double
#define FLOAT_TYPE4 double4
#define MAX_FLOAT_TYPE 1.7976931348623158e+308
#define MIN_FLOAT_TYPE -1.7976931348623158e+308
#else
#define FLOAT_TYPE float
#define FLOAT_TYPE4 float4
#define FLOAT_TYPE8 float8
#define MAX_FLOAT_TYPE 3.402823466e+38f
#define MIN_FLOAT_TYPE -3.402823466e+38f
#endif
#define VECSIZE 4
#define VECSIZE_8 8
#define MAX_KNN 30
// System includes
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <assert.h>
#include <time.h>
// CUDA runtime
#include <cuda_runtime.h>
//Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#include <device_launch_parameters.h>
typedef struct Convex_Node {
bool isLeaf;
int node_index;
int parent_index;
int leaf_index; //the leaf index of this node in all leaf nodes
int left_node;
int right_node;
} CONVEX_TREE;
/*----------pulic parameter used in __global__ functions-------------------*/
int *d_candidate_query_points_indexes = NULL;
FLOAT_TYPE *d_candidate_query_points_set=NULL;
int *d_candidate_query_points_appr_leaf_node_indexes = NULL;
FLOAT_TYPE *d_all_sorted_data_set=NULL;
int *d_sorted_data_set_indexes = NULL;
CONVEX_TREE *d_tree_struct = NULL;
FLOAT_TYPE *d_all_leaf_nodes_ALPHA_set=NULL;
FLOAT_TYPE *d_all_leaf_nodes_BETA_set=NULL;
int *d_all_constrains_num_of_each_leaf_nodes=NULL;
int *d_all_leaf_nodes_offsets_in_all_ALPHA=NULL;
int *d_all_leaf_nodes_ancestor_nodes_ids=NULL;
int *d_leaf_nodes_start_pos_in_sorted_data_set=NULL;
int *d_pts_num_in_sorted_leaf_nodes=NULL;
FLOAT_TYPE *d_dist_k_mins_global_tmp=NULL;
int *d_idx_k_mins_global_tmp=NULL;
long *d_dist_computation_times_arr=NULL;
long *d_quadprog_times_arr=NULL;
long *d_dist_computation_times_in_quadprog=NULL;
FLOAT_TYPE *d_nodes_centers=NULL;
/*----------pulic parameter used in __global__ functions-------------------*/
//free memory malloced in CUDA
void free_cuda_mem(){
if (d_candidate_query_points_indexes != NULL){
cudaFree(d_candidate_query_points_indexes);
d_candidate_query_points_indexes=NULL;
}
if (d_candidate_query_points_appr_leaf_node_indexes != NULL){
cudaFree(d_candidate_query_points_appr_leaf_node_indexes);
d_candidate_query_points_appr_leaf_node_indexes=NULL;
}
if (d_all_sorted_data_set != NULL){
cudaFree(d_all_sorted_data_set);
d_all_sorted_data_set=NULL;
}
if (d_sorted_data_set_indexes != NULL){
cudaFree(d_sorted_data_set_indexes);
d_sorted_data_set_indexes = NULL;
}
if (d_tree_struct != NULL){
cudaFree(d_tree_struct);
d_tree_struct = NULL;
}
if (d_all_leaf_nodes_ALPHA_set != NULL){
cudaFree(d_all_leaf_nodes_ALPHA_set);
d_all_leaf_nodes_ALPHA_set = NULL;
}
if (d_all_leaf_nodes_BETA_set != NULL){
cudaFree(d_all_leaf_nodes_BETA_set);
d_all_leaf_nodes_BETA_set = NULL;
}
if (d_all_constrains_num_of_each_leaf_nodes != NULL){
cudaFree(d_all_constrains_num_of_each_leaf_nodes);
d_all_constrains_num_of_each_leaf_nodes = NULL;
}
if (d_all_leaf_nodes_offsets_in_all_ALPHA != NULL){
cudaFree(d_all_leaf_nodes_offsets_in_all_ALPHA);
d_all_leaf_nodes_offsets_in_all_ALPHA = NULL;
}
if (d_all_leaf_nodes_ancestor_nodes_ids != NULL){
cudaFree(d_all_leaf_nodes_ancestor_nodes_ids);
d_all_leaf_nodes_ancestor_nodes_ids = NULL;
}
if (d_leaf_nodes_start_pos_in_sorted_data_set != NULL){
cudaFree(d_leaf_nodes_start_pos_in_sorted_data_set);
d_leaf_nodes_start_pos_in_sorted_data_set = NULL;
}
if (d_pts_num_in_sorted_leaf_nodes != NULL){
cudaFree(d_pts_num_in_sorted_leaf_nodes);
d_pts_num_in_sorted_leaf_nodes = NULL;
}
if (d_dist_k_mins_global_tmp != NULL){
cudaFree(d_dist_k_mins_global_tmp);
d_dist_k_mins_global_tmp = NULL;
}
if (d_idx_k_mins_global_tmp != NULL){
cudaFree(d_idx_k_mins_global_tmp);
d_idx_k_mins_global_tmp = NULL;
}
if (d_dist_computation_times_arr != NULL){
cudaFree(d_dist_computation_times_arr);
d_dist_computation_times_arr = NULL;
}
if (d_quadprog_times_arr != NULL){
cudaFree(d_quadprog_times_arr);
d_quadprog_times_arr = NULL;
}
if (d_dist_computation_times_in_quadprog != NULL){
cudaFree(d_dist_computation_times_in_quadprog);
d_dist_computation_times_in_quadprog = NULL;
}
if (d_nodes_centers != NULL){
cudaFree(d_nodes_centers);
d_nodes_centers = NULL;
}
}
//the inner product of q and p
__device__ FLOAT_TYPE scalar_product_cuda(FLOAT_TYPE* p, FLOAT_TYPE* q, int DIM){
//DIM will be written in "make_kernel_from_file"
FLOAT_TYPE result = 0;
for (int i = 0; i<DIM; i++){
result += p[i] * q[i];
}
return result;
}
__device__ FLOAT_TYPE float_dist_squre_cuda(FLOAT_TYPE *p, FLOAT_TYPE *q, int DIM){
FLOAT_TYPE dist_tmp=0, tmp=0;
for (int j = 0; j<DIM; j++){
tmp = (q[j] - p[j]);
dist_tmp += tmp*tmp;
}
return dist_tmp;
}
__device__ FLOAT_TYPE Compute_distance(FLOAT_TYPE *p1, FLOAT_TYPE *p2, int DIM){
FLOAT_TYPE sum = 0;
FLOAT_TYPE *end = p1 + DIM;
for (; p1 != end; p1++, p2++){
FLOAT_TYPE d1 = *p1 - *p2;
d1 *= d1;
sum = sum + d1;
}
return sqrt(sum);
}
//retrun a approximate min dist from q to this convex node
//d_min is still approximate distance, it can be improved or optimized.
//idea: if a point q is outside of this node, then max distance from
// q to each active constrains (hyperplanes) is the approximate
// distance. Because the \forall active hyperplane h, we have
// d_min >= dist(q,h);
__device__ FLOAT_TYPE approximate_min_dist_by_hyper_plane_cuda( FLOAT_TYPE* query_point,
FLOAT_TYPE* ALPHA,
FLOAT_TYPE* BETA,
int ALPPHA_size,
int DIM){
FLOAT_TYPE result = 0;
FLOAT_TYPE tmp_val = 0;
for (int i = 0; i<ALPPHA_size; i++)
{
//DIM will be written in "make_kernel_from_file"
FLOAT_TYPE* alpha = ALPHA + i*DIM;
FLOAT_TYPE beta = BETA[i];
tmp_val = scalar_product_cuda(alpha, query_point,DIM);
// if there exists a alpha and beta such that alpha[i]'* point +beta[i]<0
// point is not in the node
if (tmp_val<0){
if (result < -tmp_val){
result = -tmp_val;
}
}
}
return result;
}
//return true if the d_min from q to this node is larger than dist_compare.
//@param dist_compute_times_in_appr_quadprog: count the dist computation times here
__device__ bool is_appr_min_dist_from_q_larger_by_hyper_plane_cuda( FLOAT_TYPE *query_point,
FLOAT_TYPE *ALPHA,
FLOAT_TYPE *BETA,
int ALPPHA_size,
FLOAT_TYPE dist_compare,
long *dist_compute_times_in_appr_quadprog,
FLOAT_TYPE *query_point_scalar_product_from_all_nodes,
int *cur_ancestor_nodes_ids,
int DIM
)
{
bool result = false;
int tmp_times = 0;
int cur_ancestor_node_id = 0;
for (int i = 0; i<ALPPHA_size; i++){
FLOAT_TYPE tmp_dist = BETA[i];
//---ORIGINAL SCALAR PRODUCT, MANY DUPLICATION, but, in low dim it is faster.
for (int j = 0; j<DIM; j++){
tmp_dist += ALPHA[i*DIM + j] * query_point[j];
}
tmp_times++;
if (tmp_dist<0){
if (dist_compare <= (tmp_dist*tmp_dist)){
//if there exists one such hyper plane then return.
result = true;
break;
}
}
}
*dist_compute_times_in_appr_quadprog += tmp_times;
return result;
}
//brute force computing and update dist_k_mins_private_tmp and idx_k_mins_global_tmp
//pts_num: the number of points in all_sorted_data_set.
__device__ void do_brute_force_and_update_private_cuda(
FLOAT_TYPE *cur_query_point,
int cur_query_point_index,
int pts_num,
int cur_leaf_node_start_pos,
FLOAT_TYPE *all_sorted_data_set,
int *sorted_data_set_indexes,
FLOAT_TYPE *dist_k_mins_private_tmp,
int *idx_k_mins_private_tmp,
int K_NN,
int DIM)
{
FLOAT_TYPE dist_squre_tmp = 0;
FLOAT_TYPE tmp = 0;
int tmp_idx = 0;
for (int i = 0; i<pts_num; i++){
dist_squre_tmp = float_dist_squre_cuda(all_sorted_data_set + (cur_leaf_node_start_pos + i)*DIM, cur_query_point,DIM);
//get the current k^th min_dist_square of current query point
FLOAT_TYPE cur_k_min_dist_square = dist_k_mins_private_tmp[K_NN - 1];
if (cur_k_min_dist_square> dist_squre_tmp){
//printf("update dist_k_mins_private_tmp...\n");
//printf("cur_k_min_dist_square=%f, dist_squre_tmp=%f \n",cur_k_min_dist_square,dist_squre_tmp );
int j = K_NN - 1;
dist_k_mins_private_tmp[j] = dist_squre_tmp;
int pts_idx = sorted_data_set_indexes[cur_leaf_node_start_pos + i];
idx_k_mins_private_tmp[j] = pts_idx;
for (; j>0; j--){
if (dist_k_mins_private_tmp[j - 1] > dist_k_mins_private_tmp[j]){
//printf("new nn found, swap...");
tmp = dist_k_mins_private_tmp[j - 1];
dist_k_mins_private_tmp[j - 1] = dist_k_mins_private_tmp[j];
dist_k_mins_private_tmp[j] = tmp;
//swap indices
tmp_idx = idx_k_mins_private_tmp[j - 1];
idx_k_mins_private_tmp[j - 1] = idx_k_mins_private_tmp[j];
idx_k_mins_private_tmp[j] = tmp_idx;
}
else break;
}
}
}
}
//brute force computing and update dist_k_mins_private_tmp and idx_k_mins_global_tmp
//pts_num: the number of points in all_sorted_data_set.
__device__ void new_do_brute_force_and_update_private_cuda(
FLOAT_TYPE *cur_query_point,
int cur_query_point_index,
int pts_num,
int cur_leaf_node_start_pos,
FLOAT_TYPE *all_sorted_data_set,
int *sorted_data_set_indexes,
FLOAT_TYPE *dist_k_mins_private_tmp,
int *idx_k_mins_private_tmp,
int *remain_index,
int K_NN,
int DIM)
{
FLOAT_TYPE dist_squre_tmp = 0;
FLOAT_TYPE tmp = 0;
int tmp_idx = 0;
for (int i = 0; i<pts_num; i++){
dist_squre_tmp = float_dist_squre_cuda(all_sorted_data_set + (cur_leaf_node_start_pos + i)*DIM, cur_query_point, DIM);
//get the current k^th min_dist_square of current query point
FLOAT_TYPE cur_k_min_dist_square = dist_k_mins_private_tmp[K_NN - 1];
if (cur_k_min_dist_square> dist_squre_tmp){
//printf("update dist_k_mins_private_tmp...\n");
//printf("cur_k_min_dist_square=%f, dist_squre_tmp=%f \n",cur_k_min_dist_square,dist_squre_tmp );
int j = K_NN - 1;
dist_k_mins_private_tmp[j] = dist_squre_tmp;
int pts_idx = sorted_data_set_indexes[cur_leaf_node_start_pos + i];
idx_k_mins_private_tmp[j] = remain_index[pts_idx];
for (; j>0; j--){
if (dist_k_mins_private_tmp[j - 1] > dist_k_mins_private_tmp[j]){
//printf("new nn found, swap...");
tmp = dist_k_mins_private_tmp[j - 1];
dist_k_mins_private_tmp[j - 1] = dist_k_mins_private_tmp[j];
dist_k_mins_private_tmp[j] = tmp;
//swap indices
tmp_idx = idx_k_mins_private_tmp[j - 1];
idx_k_mins_private_tmp[j - 1] = idx_k_mins_private_tmp[j];
idx_k_mins_private_tmp[j] = tmp_idx;
}
else break;
}
}
}
}
/*
0 candidate_query_points_num : the number of current candidate query points, in the case of all query points set
is too large, we can submit subset of query sets to this kernel.
1 candidate_query_points_indexes : the indexes of current query points in all query points set
2 candidate_query_points_set : the current query points data set
3 candidate_query_points_appr_leaf_node_indexes : the approximate leaf node for candidate query points
4 all_sorted_data_set : all sorted data
5 sorted_data_set_indexes : all points indexes in sorted data set
6 tree_struct : the tree structure of the whole tree. It is not used now.
7 all_leaf_nodes_ALPHA_set : ALPHA set of all leaf nodes
8 leaf_nodes_BETA_set : BETA set of all leaf nodes
9 all_constrains_num_of_each_leaf_nodes : all_constrains_num_of_each_nodes[i]=j means i^th leaf nodes has j constrains, i.e. has j alphas and betas
10 all_leaf_nodes_offsets_in_all_ALPHA : the offset of each leaf node in ALPHA
11 leaf_node_num : the number of leaf nodes
12 all_leaf_nodes_ancestor_nodes_ids : the ancestor nodes ids of each leaf nodes
13 leaf_nodes_start_pos_in_sorted_data_set : specify the start position from which each sorted leave node in sorted data set
14 pts_num_in_sorted_leaf_nodes : the length of points saved in each sorted leaf node
15 dist_k_mins_global_tmp : the K min-distance of all query points, the length of dist_mins_global_tmp is K* query_points_size
16 idx_k_mins_global_tmp : the indexes of the K nearest neighbors, the length of dist_mins_global_tmp is K* query_points_size
17 K_NN : the value of K
18 dist_computation_times_arr : dist_computation_times_arr[i] saves total distance computation times of the i^th point and
19 quadprog_times_arr : quadprog_times_arr[i] approximate quadprog times of the i^th point.
20 dist_computation_times_in_quadprog: dist_computation_times_in_quadprog[i] saves the total distance computation times
in quadprog of the i^th point.
*/
__global__ void do_finding_KNN_by_leaf_order_cuda(
int candidate_query_points_num,
int *candidate_query_points_indexes,
FLOAT_TYPE *candidate_query_points_set,
int *candidate_query_points_appr_leaf_node_indexes,
FLOAT_TYPE *all_sorted_data_set,
int *sorted_data_set_indexes,
CONVEX_TREE *tree_struct,
FLOAT_TYPE *all_leaf_nodes_ALPHA_set,
FLOAT_TYPE *all_leaf_nodes_BETA_set,
int *all_constrains_num_of_each_leaf_nodes,
int *all_leaf_nodes_offsets_in_all_ALPHA,
int leaf_node_num,
int *all_leaf_nodes_ancestor_nodes_ids,
int *leaf_nodes_start_pos_in_sorted_data_set,
int *pts_num_in_sorted_leaf_nodes,
FLOAT_TYPE *dist_k_mins_global_tmp,
int *idx_k_mins_global_tmp,
int K_NN,
long *dist_computation_times_arr,
long *quadprog_times_arr,
long *dist_computation_times_in_quadprog,
int NODES_NUM,
int DIM,
int loop_id)
{
//---global thread id
//int tid = blockIdx.x;
//int tid = blockDim.x * blockIdx.x + threadIdx.x;
//int tid = (blockIdx.y * gridDim.x + blockIdx.x) * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.y + threadIdx.x;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tid = x + y * blockDim.x * gridDim.x;
//printf(" [loopid=%d, tid_ori=%d], ", loop_id,tid);
//tid +=loop_id* blocks_per_time;
if (tid >= candidate_query_points_num){
return;
}
//---count the distance computation times in approximate quadprog.
long cur_dist_compute_times_in_appr_quadprog = 0;
int cur_query_points_appr_leaf_node_indexes = candidate_query_points_appr_leaf_node_indexes[tid];
int cur_leaf_node_start_pos = leaf_nodes_start_pos_in_sorted_data_set[cur_query_points_appr_leaf_node_indexes];
/*---------------------------------------------------------------------------------------------------------------
//---query_points_nodes_alpha_scalar_product is not used in is_appr_min_dist_from_q_larger_by_hyper_plane now.
//---because visiting query_points_nodes_alpha_scalar_product randomly seems slow.
//---private scalar product between current query point and all ALPHAs, which are all initialized to 0.
//---each node has a alpha constraint, a well as constraints of its ancestors nodes.
//---'ALL_NODES_NUM' will be written before kernel is created.
----------------------------------------------------------------------------------------------------------------*/\
/*-----------------Copy global data as local data: visiting global data is relative slow in devices-----------------------------*/
int quadprog_times_private = 0;
for (int j = 0; j < K_NN; j++){
dist_k_mins_global_tmp[candidate_query_points_indexes[tid]*K_NN + j] = MAX_FLOAT_TYPE;
idx_k_mins_global_tmp[candidate_query_points_indexes[tid]*K_NN + j] = -1;
}
//---here is tid instead of cur_query_point_index, tid is the offset of current query point in candidate_query_points_set
FLOAT_TYPE* cur_query_point = candidate_query_points_set + tid*DIM;
/*-----------------------------------------------------------------------------------------------------------------------------*/
long dist_computation_times_tmp = 0;
int pts_num = pts_num_in_sorted_leaf_nodes[cur_query_points_appr_leaf_node_indexes];
//---find approximate kNN in its approximate nodes.
do_brute_force_and_update_private_cuda( cur_query_point, candidate_query_points_indexes[tid],
pts_num, cur_leaf_node_start_pos,
all_sorted_data_set, sorted_data_set_indexes,
dist_k_mins_global_tmp+candidate_query_points_indexes[tid]*K_NN,
idx_k_mins_global_tmp+candidate_query_points_indexes[tid]*K_NN,
K_NN, DIM);
//---add distance computation times
//dist_computation_times_tmp += pts_num;
for (int i = 0; i < leaf_node_num; i++) {
if (i == cur_query_points_appr_leaf_node_indexes)
continue;
int alpha_offset = all_leaf_nodes_offsets_in_all_ALPHA[i];
int constrains_num = all_constrains_num_of_each_leaf_nodes[i];
//---get the current k^th min_dist_square of current query point
FLOAT_TYPE cur_k_min_dist_square = dist_k_mins_global_tmp[candidate_query_points_indexes[tid]*K_NN+K_NN - 1];
FLOAT_TYPE* cur_ALPHAT = all_leaf_nodes_ALPHA_set + alpha_offset*DIM;
FLOAT_TYPE* cur_BETA = all_leaf_nodes_BETA_set + alpha_offset;
//---the number of ancestor nodes is the same as the size of constraints
int* cur_ancestor_nodes_ids = all_leaf_nodes_ancestor_nodes_ids + alpha_offset;
//---check whether the current node is a candidate for current query point
if (!is_appr_min_dist_from_q_larger_by_hyper_plane_cuda(cur_query_point, cur_ALPHAT, cur_BETA,
constrains_num, cur_k_min_dist_square,
&cur_dist_compute_times_in_appr_quadprog,
NULL,
cur_ancestor_nodes_ids,
DIM
) )
{
//---do brute force distance computation here, and update dist_k_mins_global_tmp and idx_k_mins_global_tmp
//---get the number of points saved in current node
//---i is cur leaf node index, not leaf node ori_index
int pts_num = pts_num_in_sorted_leaf_nodes[i];
int cur_leaf_node_start_pos = leaf_nodes_start_pos_in_sorted_data_set[i];
do_brute_force_and_update_private_cuda( cur_query_point, candidate_query_points_indexes[tid],
pts_num, cur_leaf_node_start_pos,
all_sorted_data_set, sorted_data_set_indexes,
dist_k_mins_global_tmp+candidate_query_points_indexes[tid]*K_NN,
idx_k_mins_global_tmp+candidate_query_points_indexes[tid]*K_NN,
K_NN, DIM);
}
}
}
/*
0 candidate_query_points_num : the number of current candidate query points, in the case of all query points set
is too large, we can submit subset of query sets to this kernel.
1 candidate_query_points_indexes : the indexes of current query points in all query points set
2 candidate_query_points_set : the current query points data set
candidate_query_points_appr_leaf_node_indexes : the approximate leaf node for candidate query points
4 all_sorted_data_set : all sorted data
5 sorted_data_set_indexes : all points indexes in sorted data set
6 tree_struct : the tree structure of the whole tree. It is not used now.
7 all_leaf_nodes_ALPHA_set : ALPHA set of all leaf nodes
8 leaf_nodes_BETA_set : BETA set of all leaf nodes
9 all_constrains_num_of_each_leaf_nodes : all_constrains_num_of_each_nodes[i]=j means i^th leaf nodes has j constrains, i.e. has j alphas and betas
10 all_leaf_nodes_offsets_in_all_ALPHA : the offset of each leaf node in ALPHA
11 leaf_node_num : the number of leaf nodes
12 all_leaf_nodes_ancestor_nodes_ids : the ancestor nodes ids of each leaf nodes
13 leaf_nodes_start_pos_in_sorted_data_set : specify the start position from which each sorted leave node in sorted data set
14 pts_num_in_sorted_leaf_nodes : the length of points saved in each sorted leaf node
15 dist_k_mins_global_tmp : the K min-distance of all query points, the length of dist_mins_global_tmp is K* query_points_size
16 idx_k_mins_global_tmp : the indexes of the K nearest neighbors, the length of dist_mins_global_tmp is K* query_points_size
17 K_NN : the value of K
18 dist_computation_times_arr : dist_computation_times_arr[i] saves total distance computation times of the i^th point and
19 quadprog_times_arr : quadprog_times_arr[i] approximate quadprog times of the i^th point.
20 dist_computation_times_in_quadprog: dist_computation_times_in_quadprog[i] saves the total distance computation times
in quadprog of the i^th point.
*/
__global__ void new_do_finding_KNN_by_leaf_order_cuda(
int candidate_query_points_num,
int *candidate_query_points_indexes,
FLOAT_TYPE *candidate_query_points_set,
FLOAT_TYPE *all_sorted_data_set,
int *sorted_data_set_indexes,
CONVEX_TREE *tree_struct,
FLOAT_TYPE *all_leaf_nodes_ALPHA_set,
FLOAT_TYPE *all_leaf_nodes_BETA_set,
int *all_constrains_num_of_each_leaf_nodes,
int *all_leaf_nodes_offsets_in_all_ALPHA,
int leaf_node_num,
int *all_leaf_nodes_ancestor_nodes_ids,
int *leaf_nodes_start_pos_in_sorted_data_set,
int *pts_num_in_sorted_leaf_nodes,
FLOAT_TYPE *dist_k_mins_global_tmp,
int *idx_k_mins_global_tmp,
int *remain_index,
int K_NN,
long *dist_computation_times_arr,
long *quadprog_times_arr,
long *dist_computation_times_in_quadprog,
int NODES_NUM,
int DIM,
int loop_id)
{
//---global thread id
//int tid = blockIdx.x;
//int tid = blockDim.x * blockIdx.x + threadIdx.x;
//int tid = (blockIdx.y * gridDim.x + blockIdx.x) * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.y + threadIdx.x;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tid = x + y * blockDim.x * gridDim.x;
//printf(" [loopid=%d, tid_ori=%d], ", loop_id,tid);
//tid +=loop_id* blocks_per_time;
if (tid >= candidate_query_points_num){
return;
}
//---count the distance computation times in approximate quadprog.
long cur_dist_compute_times_in_appr_quadprog = 0;
//int cur_query_points_appr_leaf_node_indexes = candidate_query_points_appr_leaf_node_indexes[tid];
//int cur_leaf_node_start_pos = leaf_nodes_start_pos_in_sorted_data_set[cur_query_points_appr_leaf_node_indexes];
/*---------------------------------------------------------------------------------------------------------------
//---query_points_nodes_alpha_scalar_product is not used in is_appr_min_dist_from_q_larger_by_hyper_plane now.
//---because visiting query_points_nodes_alpha_scalar_product randomly seems slow.
//---private scalar product between current query point and all ALPHAs, which are all initialized to 0.
//---each node has a alpha constraint, a well as constraints of its ancestors nodes.
//---'ALL_NODES_NUM' will be written before kernel is created.
----------------------------------------------------------------------------------------------------------------*/\
/*-----------------Copy global data as local data: visiting global data is relative slow in devices-----------------------------*/
int quadprog_times_private = 0;
/*for (int j = 0; j < K_NN; j++){
dist_k_mins_global_tmp[candidate_query_points_indexes[tid]*K_NN + j] = MAX_FLOAT_TYPE;
idx_k_mins_global_tmp[candidate_query_points_indexes[tid]*K_NN + j] = -1;
}
*/
//---here is tid instead of cur_query_point_index, tid is the offset of current query point in candidate_query_points_set
FLOAT_TYPE* cur_query_point = candidate_query_points_set + tid*DIM;
/*-----------------------------------------------------------------------------------------------------------------------------*/
long dist_computation_times_tmp = 0;
//int pts_num = pts_num_in_sorted_leaf_nodes[cur_query_points_appr_leaf_node_indexes];
//---find approximate kNN in its approximate nodes.
/*do_brute_force_and_update_private_cuda( cur_query_point, candidate_query_points_indexes[tid],
pts_num, cur_leaf_node_start_pos,
all_sorted_data_set, sorted_data_set_indexes,
dist_k_mins_global_tmp+candidate_query_points_indexes[tid]*K_NN,
idx_k_mins_global_tmp+candidate_query_points_indexes[tid]*K_NN,
K_NN, DIM);
*/
//---add distance computation times
//dist_computation_times_tmp += pts_num;
for (int i = 0; i < leaf_node_num; i++) {
//if (i == cur_query_points_appr_leaf_node_indexes)
//continue;
int alpha_offset = all_leaf_nodes_offsets_in_all_ALPHA[i];
int constrains_num = all_constrains_num_of_each_leaf_nodes[i];
//---get the current k^th min_dist_square of current query point
FLOAT_TYPE cur_k_min_dist_square = dist_k_mins_global_tmp[candidate_query_points_indexes[tid]*K_NN+K_NN - 1];
FLOAT_TYPE* cur_ALPHAT = all_leaf_nodes_ALPHA_set + alpha_offset*DIM;
FLOAT_TYPE* cur_BETA = all_leaf_nodes_BETA_set + alpha_offset;
//---the number of ancestor nodes is the same as the size of constraints
int* cur_ancestor_nodes_ids = all_leaf_nodes_ancestor_nodes_ids + alpha_offset;
//---check whether the current node is a candidate for current query point
if (!is_appr_min_dist_from_q_larger_by_hyper_plane_cuda(cur_query_point, cur_ALPHAT, cur_BETA,
constrains_num, cur_k_min_dist_square,
&cur_dist_compute_times_in_appr_quadprog,
NULL,
cur_ancestor_nodes_ids,
DIM
) )
{
//---do brute force distance computation here, and update dist_k_mins_global_tmp and idx_k_mins_global_tmp
//---get the number of points saved in current node
//---i is cur leaf node index, not leaf node ori_index
int pts_num = pts_num_in_sorted_leaf_nodes[i];
int cur_leaf_node_start_pos = leaf_nodes_start_pos_in_sorted_data_set[i];
new_do_brute_force_and_update_private_cuda( cur_query_point, candidate_query_points_indexes[tid],
pts_num, cur_leaf_node_start_pos,
all_sorted_data_set, sorted_data_set_indexes,
dist_k_mins_global_tmp+candidate_query_points_indexes[tid]*K_NN,
idx_k_mins_global_tmp+candidate_query_points_indexes[tid]*K_NN,
remain_index,K_NN, DIM);
}
}
}
__global__ void print_float_Data(FLOAT_TYPE *dist_k_mins_global_tmp, int* idx_k_mins_global_tmp, int loop_id){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int tid_ori=tid;
tid+= loop_id*1024;
int K_NN=30;
int cur_query_point_index=tid;
for (int j = 0; j<K_NN; j++){
printf(" [tid=%d, j=%d, dist =%f, idx=%d] ", tid, j, dist_k_mins_global_tmp[cur_query_point_index*K_NN + j], idx_k_mins_global_tmp[cur_query_point_index*K_NN + j]);
}
}
/*
//find kNN by brute force
0 data_set : the number of current candidate query points
1 data_set_size : cardinal
2 query_points : all query points
3 query_points_size : the length of query_points
4 dist_k_mins_global_tmp : the K min-distance of all query points,
the length of dist_mins_global_tmp is K* query_points_size
5 idx_k_mins_global_tmp : the indexes of the K nearest neighbors,
the length of dist_mins_global_tmp is K* query_points_size
6 K_NN : the value of K
*/
/*
__global__ void do_brute_force_KNN_cuda(
FLOAT_TYPE *data_set,int data_set_size,
FLOAT_TYPE *query_set,int query_set_size,
FLOAT_TYPE *KNN_index_with_dist,
int K,int DIM)
{
//global thread id
int tid = blockDim.x *blockIdx.x + threadIdx.x;
if (tid > query_set_size){
return;
}
unsigned int current_query_point_index = tid;
FLOAT_TYPE *temp = new FLOAT_TYPE[2 * K];
FLOAT_TYPE *p1, *p2;
int tmp;
FLOAT_TYPE d, max_dist, max_idx;
p1 = query_set + current_query_point_index*DIM;
for (int i = 0; i < data_set_size; i++){
p2 = data_set + i*DIM;
d = Compute_distance(p1, p2, DIM);
if (i < K){
temp[i * 2] = i;
temp[i * 2 + 1] = d;
}
else{
tmp = 0;
max_idx = temp[0];
max_dist = temp[1];
for (int j = 1; j < K; j++){
if (temp[2 * j + 1] > max_dist){
tmp = j;
max_idx = temp[2 * j];
max_dist = temp[2 * j + 1];
}
}
if (d < max_dist){
temp[tmp * 2] = i;
temp[tmp * 2 + 1] = d;
}
}
}
memcpy(KNN_index_with_dist + current_query_point_index * 2 * K, temp, (2 * K)*sizeof(FLOAT_TYPE));
//cudaMemcpy(KNN_index_with_dist + current_query_point_index * 2 * K, temp, (2 * K)*sizeof(FLOAT_TYPE), cudaMemcpyDeviceToDevice);
}
*/
__global__ void do_brute_force_KNN_cuda(
FLOAT_TYPE *data_set,
int data_set_size,
FLOAT_TYPE *query_points,
int query_points_size,
FLOAT_TYPE *dist_k_mins_global_tmp,
int *idx_k_mins_global_tmp,
int K_NN,
int DIM)
{
// global thread id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= query_points_size){
return;
}
unsigned int current_query_point_index = tid;
//---init the distance as MAX_FLOAT_TYPE
for (int i = 0; i<K_NN; i++){
dist_k_mins_global_tmp[current_query_point_index*K_NN + i] = MAX_FLOAT_TYPE;
}
//get the current k^th min_dist_square of current query point
FLOAT_TYPE cur_k_min_dist_square = dist_k_mins_global_tmp[current_query_point_index*K_NN + K_NN - 1];
//if (tid==checkid)
// printf("cur_k_min_dist_square =%f \n",cur_k_min_dist_square);
FLOAT_TYPE dist_square_tmp = 0;
FLOAT_TYPE tmp = 0;
int tmp_idx = 0;
//local copy
FLOAT_TYPE* cur_query_point_private = new FLOAT_TYPE[DIM];
for (int i = 0; i<DIM; i++){
cur_query_point_private[i] = query_points[current_query_point_index*DIM + i];
}
for (int i = 0; i<data_set_size; i++){
dist_square_tmp = 0;
cur_k_min_dist_square = dist_k_mins_global_tmp[current_query_point_index*K_NN + K_NN - 1];
for (int j = 0; j<DIM; j++){
tmp = data_set[i*DIM + j] - cur_query_point_private[j];
dist_square_tmp += tmp*tmp;
//printf("tmp =%f, dist_square_tmp=%f\n",tmp,dist_square_tmp);
}
//printf("dist_square_tmp =%f, cur_k_min_dist_square=%f \n",dist_square_tmp, cur_k_min_dist_square);
if (cur_k_min_dist_square> dist_square_tmp){
//printf("update dist_k_mins_global_tmp...\n");
int j = K_NN - 1;
dist_k_mins_global_tmp[current_query_point_index*K_NN + j] = dist_square_tmp;
idx_k_mins_global_tmp[current_query_point_index*K_NN + j] = i;
for (; j>0; j--){
if (dist_k_mins_global_tmp[current_query_point_index*K_NN + j - 1] > dist_k_mins_global_tmp[current_query_point_index*K_NN + j]){
//printf("new nn found, swap...");
tmp = dist_k_mins_global_tmp[current_query_point_index*K_NN + j - 1];
dist_k_mins_global_tmp[current_query_point_index*K_NN + j - 1] = dist_k_mins_global_tmp[current_query_point_index*K_NN + j];
dist_k_mins_global_tmp[current_query_point_index*K_NN + j] = tmp;
//swap indices
tmp_idx = idx_k_mins_global_tmp[current_query_point_index*K_NN + j - 1];
idx_k_mins_global_tmp[current_query_point_index*K_NN + j - 1] = idx_k_mins_global_tmp[current_query_point_index*K_NN + j];
idx_k_mins_global_tmp[current_query_point_index*K_NN + j] = tmp_idx;
}
else break;
}
}
}
}
/*
//find kNN by brute force for outliers
0 data_set : the number of current candidate query points
1 data_set_size : cardinal
2 query_points : all query points
3 query_points_size : the length of query_points
4 dist_k_mins_global_tmp : the K min-distance of all query points,
the length of dist_mins_global_tmp is K* query_points_size
5 idx_k_mins_global_tmp : the indexes of the K nearest neighbors,
the length of dist_mins_global_tmp is K* query_points_size
6 K_NN : the value of K
*/
__global__ void do_brute_force_KNN_for_outliers_cuda(
FLOAT_TYPE *data_set,
int data_set_size,
int *data_indexes,
FLOAT_TYPE *query_points,
int query_points_size,
FLOAT_TYPE *dist_k_mins_global_tmp,
int *idx_k_mins_global_tmp,
int K_NN,
int DIM)
{
// global thread id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//int tid = threadIdx.x;
//int tid = 0;
//printf("tid =%d \n",tid);
if (tid >= query_points_size){
return;
}
//printf("tid=%d, data_set_size =%d,query_points_size=%d \n",tid,data_set_size,query_points_size);
unsigned int current_query_point_index = tid;
//get the current k^th min_dist_square of current query point
FLOAT_TYPE cur_k_min_dist_square = dist_k_mins_global_tmp[current_query_point_index*K_NN + K_NN - 1];
//if (tid==checkid)
// printf("cur_k_min_dist_square =%f \n",cur_k_min_dist_square);
FLOAT_TYPE dist_square_tmp = 0;
FLOAT_TYPE tmp = 0;
int tmp_idx = 0;
//local copy
FLOAT_TYPE* cur_query_point_private = new FLOAT_TYPE[DIM];
for (int i = 0; i<DIM; i++){
cur_query_point_private[i] = query_points[current_query_point_index*DIM + i];
}
for (int i = 0; i < data_set_size; i++){
dist_square_tmp = 0;
cur_k_min_dist_square = dist_k_mins_global_tmp[current_query_point_index*K_NN + K_NN - 1];
for (int j = 0; j<DIM; j++){
tmp = data_set[i*DIM + j] - cur_query_point_private[j];
dist_square_tmp += tmp*tmp;
//printf("tmp =%f, dist_square_tmp=%f\n",tmp,dist_square_tmp);
}
//printf("dist_square_tmp =%f, cur_k_min_dist_square=%f \n",dist_square_tmp, cur_k_min_dist_square);
if (cur_k_min_dist_square> dist_square_tmp){
//printf("update dist_k_mins_global_tmp...\n");
int j = K_NN - 1;
dist_k_mins_global_tmp[current_query_point_index*K_NN + j] = dist_square_tmp;
idx_k_mins_global_tmp[current_query_point_index*K_NN + j] = data_indexes[i];
for (; j>0; j--){
if (dist_k_mins_global_tmp[current_query_point_index*K_NN + j - 1] > dist_k_mins_global_tmp[current_query_point_index*K_NN + j]){
//printf("new nn found, swap...");
tmp = dist_k_mins_global_tmp[current_query_point_index*K_NN + j - 1];
dist_k_mins_global_tmp[current_query_point_index*K_NN + j - 1] = dist_k_mins_global_tmp[current_query_point_index*K_NN + j];
dist_k_mins_global_tmp[current_query_point_index*K_NN + j] = tmp;
//swap indices
tmp_idx = idx_k_mins_global_tmp[current_query_point_index*K_NN + j - 1];
idx_k_mins_global_tmp[current_query_point_index*K_NN + j - 1] = idx_k_mins_global_tmp[current_query_point_index*K_NN + j];
idx_k_mins_global_tmp[current_query_point_index*K_NN + j] = tmp_idx;
}
else break;
}
}
}
}
__device__ int get_min_k_index_cuda(FLOAT_TYPE *dist_k_mins_private_tmp, int K_NN){
FLOAT_TYPE tmp_max = dist_k_mins_private_tmp[0];
int result = 0;
for (int i = 1; i<K_NN; i++){
if (dist_k_mins_private_tmp[i]>tmp_max){
result = i;
tmp_max = dist_k_mins_private_tmp[i];
}
}
return result;
}
bool init_CUDA_device(){
int count;
cudaGetDeviceCount(&count);
//printf("\nDevice number: %d\n", count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
int i;
for (i = 0; i < count; i++) {
cudaDeviceProp prop;
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
//printf("Device name :%s\n", prop.name);
//printf("Device major :%d\n", prop.major);
//printf("Device multiProcessorCount :%d\n", prop.multiProcessorCount);
//printf("Device maxThreadsPerBlock :%d\n", prop.maxThreadsPerBlock);
//printf("Device totalGlobalMem :%ld\n", prop.totalGlobalMem);
//printf("Device totalConstMem :%ld\n", prop.totalConstMem);
if (prop.major >= 1) {
break;
}
}
}
if (i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
cudaSetDevice(i);
return true;
}
/**
* Host main routine
*/
extern "C" int call_cuda_kernel(
int candidate_query_points_num,
int *candidate_query_points_indexes,
FLOAT_TYPE *candidate_query_points_set,
int *candidate_query_points_appr_leaf_node_indexes,
int sorted_data_len,
FLOAT_TYPE *all_sorted_data_set,
int *sorted_data_set_indexes,
int tree_nodes_num,
CONVEX_TREE *tree_struct,
int all_leaf_nodes_constraint_num,
FLOAT_TYPE *all_leaf_nodes_ALPHA_set,
FLOAT_TYPE *all_leaf_nodes_BETA_set,
int *all_constrains_num_of_each_leaf_nodes,
int *all_leaf_nodes_offsets_in_all_ALPHA,
int leaf_node_num,
int *all_leaf_nodes_ancestor_nodes_ids,
int *leaf_nodes_start_pos_in_sorted_data_set,
int *pts_num_in_sorted_leaf_nodes,
FLOAT_TYPE *dist_k_mins_global_tmp,
int *idx_k_mins_global_tmp,
int K_NN,
long *dist_computation_times_arr,
long *quadprog_times_arr,
long *dist_computation_times_in_quadprog,
int NODES_NUM,
int DIM)
{
clock_t start, finish,start1, finish1;
float duration ;
bool cuda_init =init_CUDA_device();
if (cuda_init){
printf("\nsucced for initializing CUDA");
}
cudaError_t err = cudaSuccess;
//Launch the Vector Add CUDA Kernel
printf("\nCUDA Malloc Memory.....\n ");
start=clock();
size_t size = candidate_query_points_num * sizeof(int);
if (d_candidate_query_points_indexes==NULL){
err = cudaMalloc((void **)&d_candidate_query_points_indexes, size);
err = cudaMemcpy(d_candidate_query_points_indexes, candidate_query_points_indexes, size, cudaMemcpyHostToDevice);
}
size = candidate_query_points_num * sizeof(FLOAT_TYPE)*DIM;
if (d_candidate_query_points_set==NULL){
err = cudaMalloc((void **)&d_candidate_query_points_set, size);
err = cudaMemcpy(d_candidate_query_points_set, candidate_query_points_set, size, cudaMemcpyHostToDevice);
}
size = candidate_query_points_num * sizeof(int);
if (d_candidate_query_points_appr_leaf_node_indexes==NULL){
err = cudaMalloc((void **)&d_candidate_query_points_appr_leaf_node_indexes, size);
err = cudaMemcpy(d_candidate_query_points_appr_leaf_node_indexes, candidate_query_points_appr_leaf_node_indexes, size, cudaMemcpyHostToDevice);
}
size = sorted_data_len * sizeof(FLOAT_TYPE)*DIM;
if (d_all_sorted_data_set==NULL){
err = cudaMalloc((void **)&d_all_sorted_data_set, size);
err = cudaMemcpy(d_all_sorted_data_set, all_sorted_data_set, size, cudaMemcpyHostToDevice);
}
size = sorted_data_len * sizeof(int);
if (d_sorted_data_set_indexes==NULL){
err = cudaMalloc((void **)&d_sorted_data_set_indexes, size);
err = cudaMemcpy(d_sorted_data_set_indexes, sorted_data_set_indexes, size, cudaMemcpyHostToDevice);
}
size=tree_nodes_num*sizeof(CONVEX_TREE);
if (d_tree_struct==NULL){
err = cudaMalloc((void **)&d_tree_struct, size);
err = cudaMemcpy(d_tree_struct, tree_struct, size, cudaMemcpyHostToDevice);
}
size= all_leaf_nodes_constraint_num* sizeof(FLOAT_TYPE)*DIM;
if (d_all_leaf_nodes_ALPHA_set==NULL){
err = cudaMalloc((void **)&d_all_leaf_nodes_ALPHA_set, size);
err = cudaMemcpy(d_all_leaf_nodes_ALPHA_set, all_leaf_nodes_ALPHA_set, size, cudaMemcpyHostToDevice);
}
size= all_leaf_nodes_constraint_num* sizeof(FLOAT_TYPE);
if (d_all_leaf_nodes_BETA_set==NULL){
err = cudaMalloc((void **)&d_all_leaf_nodes_BETA_set, size);
err = cudaMemcpy(d_all_leaf_nodes_BETA_set, all_leaf_nodes_BETA_set, size, cudaMemcpyHostToDevice);
}
size= leaf_node_num*sizeof(int);
if (d_all_constrains_num_of_each_leaf_nodes==NULL){
err = cudaMalloc((void **)&d_all_constrains_num_of_each_leaf_nodes, size);
err = cudaMemcpy(d_all_constrains_num_of_each_leaf_nodes, all_constrains_num_of_each_leaf_nodes, size, cudaMemcpyHostToDevice);
}
if (d_all_leaf_nodes_offsets_in_all_ALPHA==NULL){
err = cudaMalloc((void **)&d_all_leaf_nodes_offsets_in_all_ALPHA, size);
err = cudaMemcpy(d_all_leaf_nodes_offsets_in_all_ALPHA, all_leaf_nodes_offsets_in_all_ALPHA, size, cudaMemcpyHostToDevice);
}
if (d_all_leaf_nodes_ancestor_nodes_ids==NULL){
err = cudaMalloc((void **)&d_all_leaf_nodes_ancestor_nodes_ids, size);
err = cudaMemcpy(d_all_leaf_nodes_ancestor_nodes_ids, all_leaf_nodes_ancestor_nodes_ids, size, cudaMemcpyHostToDevice);
}
if (d_leaf_nodes_start_pos_in_sorted_data_set==NULL){
err = cudaMalloc((void **)&d_leaf_nodes_start_pos_in_sorted_data_set, size);
err = cudaMemcpy(d_leaf_nodes_start_pos_in_sorted_data_set, leaf_nodes_start_pos_in_sorted_data_set, size, cudaMemcpyHostToDevice);
}
if (d_pts_num_in_sorted_leaf_nodes==NULL){
err = cudaMalloc((void **)&d_pts_num_in_sorted_leaf_nodes, size);
err = cudaMemcpy(d_pts_num_in_sorted_leaf_nodes, pts_num_in_sorted_leaf_nodes, size, cudaMemcpyHostToDevice);
}
size= candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN;
if (d_dist_k_mins_global_tmp==NULL){
err = cudaMalloc((void **)&d_dist_k_mins_global_tmp, size);
}
size= candidate_query_points_num* sizeof(int)* K_NN;
if (d_idx_k_mins_global_tmp==NULL){
err = cudaMalloc((void **)&d_idx_k_mins_global_tmp, size);
}
size= candidate_query_points_num*sizeof(long);
err = cudaMalloc((void **)&d_dist_computation_times_arr, size);
err = cudaMalloc((void **)&d_quadprog_times_arr, size);
err = cudaMalloc((void **)&d_dist_computation_times_in_quadprog, size);
int task_per_num=100000/256;
finish=clock();
duration = (float)(finish-start)/ CLOCKS_PER_SEC;
//printf( "\n CUDA Malloc Memory Time %f\n", duration);
//printf ("\n calling do_finding_KNN_by_leaf_order_cuda.....\n");
int block_num = candidate_query_points_num / 1024 + 1;
dim3 blocks(block_num,1), threads(1024,1);
//int blocks_per_time=6*16*128*8;
start1 = clock();
do_finding_KNN_by_leaf_order_cuda <<< blocks, threads>>>(
candidate_query_points_num,
d_candidate_query_points_indexes,
d_candidate_query_points_set,
d_candidate_query_points_appr_leaf_node_indexes,
d_all_sorted_data_set,
d_sorted_data_set_indexes,
d_tree_struct,
d_all_leaf_nodes_ALPHA_set,
d_all_leaf_nodes_BETA_set,
d_all_constrains_num_of_each_leaf_nodes,
d_all_leaf_nodes_offsets_in_all_ALPHA,
leaf_node_num,
d_all_leaf_nodes_ancestor_nodes_ids,
d_leaf_nodes_start_pos_in_sorted_data_set,
d_pts_num_in_sorted_leaf_nodes,
d_dist_k_mins_global_tmp,
d_idx_k_mins_global_tmp,
K_NN,
d_dist_computation_times_arr,
d_quadprog_times_arr,
d_dist_computation_times_in_quadprog,
NODES_NUM,
DIM,
1);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
//exit(EXIT_FAILURE);
}
finish1 = clock();
duration = (float)(finish1-start1)/ CLOCKS_PER_SEC;
//printf( "\n performing do_finding_KNN_by_leaf_order_cuda time %f milsec %f s\n",(float)(finish1-start1), duration);
//printf( "----print device matrix-------");
//print_float_Data <<<4, 256>>>(d_dist_k_mins_global_tmp,d_idx_k_mins_global_tmp,0);
// Copy the device result vector in device memory to the host result vector
// in host memory.
//printf("\n copy data from GPU....\n");
size= candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN;
err = cudaMemcpy(dist_k_mins_global_tmp, d_dist_k_mins_global_tmp, size, cudaMemcpyDeviceToHost);
size= candidate_query_points_num* sizeof(int)* K_NN;
err = cudaMemcpy(idx_k_mins_global_tmp, d_idx_k_mins_global_tmp, size, cudaMemcpyDeviceToHost);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
//exit(EXIT_FAILURE);
}
free_cuda_mem();
return 0;
}
/**
* Host main routine
*/
extern "C" int new_call_cuda_kernel(
int candidate_query_points_num,
int *candidate_query_points_indexes,
FLOAT_TYPE *candidate_query_points_set,
int sorted_data_len,
FLOAT_TYPE *all_sorted_data_set,
int *sorted_data_set_indexes,
int tree_nodes_num,
CONVEX_TREE *tree_struct,
int all_leaf_nodes_constraint_num,
FLOAT_TYPE *all_leaf_nodes_ALPHA_set,
FLOAT_TYPE *all_leaf_nodes_BETA_set,
int *all_constrains_num_of_each_leaf_nodes,
int *all_leaf_nodes_offsets_in_all_ALPHA,
int leaf_node_num,
int *all_leaf_nodes_ancestor_nodes_ids,
int *leaf_nodes_start_pos_in_sorted_data_set,
int *pts_num_in_sorted_leaf_nodes,
FLOAT_TYPE *dist_k_mins_global_tmp,
int *idx_k_mins_global_tmp,
int *remain_index,
int K_NN,
long *dist_computation_times_arr,
long *quadprog_times_arr,
long *dist_computation_times_in_quadprog,
int NODES_NUM,
int DIM)
{
clock_t start, finish, start1, finish1;
float duration;
bool cuda_init = init_CUDA_device();
if (cuda_init){
printf("\nsucced for initializing CUDA");
}
cudaError_t err = cudaSuccess;
//Launch the Vector Add CUDA Kernel
printf("\nCUDA Malloc Memory.....\n ");
start = clock();
size_t size = candidate_query_points_num * sizeof(int);
if (d_candidate_query_points_indexes == NULL){
err = cudaMalloc((void **)&d_candidate_query_points_indexes, size);
err = cudaMemcpy(d_candidate_query_points_indexes, candidate_query_points_indexes, size, cudaMemcpyHostToDevice);
}
size = candidate_query_points_num * sizeof(FLOAT_TYPE)*DIM;
if (d_candidate_query_points_set == NULL){
err = cudaMalloc((void **)&d_candidate_query_points_set, size);
err = cudaMemcpy(d_candidate_query_points_set, candidate_query_points_set, size, cudaMemcpyHostToDevice);
}
size = sorted_data_len * sizeof(FLOAT_TYPE)*DIM;
if (d_all_sorted_data_set == NULL){
err = cudaMalloc((void **)&d_all_sorted_data_set, size);
err = cudaMemcpy(d_all_sorted_data_set, all_sorted_data_set, size, cudaMemcpyHostToDevice);
}
size = sorted_data_len * sizeof(int);
if (d_sorted_data_set_indexes == NULL){
err = cudaMalloc((void **)&d_sorted_data_set_indexes, size);
err = cudaMemcpy(d_sorted_data_set_indexes, sorted_data_set_indexes, size, cudaMemcpyHostToDevice);
}
size = tree_nodes_num*sizeof(CONVEX_TREE);
if (d_tree_struct == NULL){
err = cudaMalloc((void **)&d_tree_struct, size);
err = cudaMemcpy(d_tree_struct, tree_struct, size, cudaMemcpyHostToDevice);
}
size = all_leaf_nodes_constraint_num* sizeof(FLOAT_TYPE)*DIM;
if (d_all_leaf_nodes_ALPHA_set == NULL){
err = cudaMalloc((void **)&d_all_leaf_nodes_ALPHA_set, size);
err = cudaMemcpy(d_all_leaf_nodes_ALPHA_set, all_leaf_nodes_ALPHA_set, size, cudaMemcpyHostToDevice);
}
size = all_leaf_nodes_constraint_num* sizeof(FLOAT_TYPE);
if (d_all_leaf_nodes_BETA_set == NULL){
err = cudaMalloc((void **)&d_all_leaf_nodes_BETA_set, size);
err = cudaMemcpy(d_all_leaf_nodes_BETA_set, all_leaf_nodes_BETA_set, size, cudaMemcpyHostToDevice);
}
size = leaf_node_num*sizeof(int);
if (d_all_constrains_num_of_each_leaf_nodes == NULL){
err = cudaMalloc((void **)&d_all_constrains_num_of_each_leaf_nodes, size);
err = cudaMemcpy(d_all_constrains_num_of_each_leaf_nodes, all_constrains_num_of_each_leaf_nodes, size, cudaMemcpyHostToDevice);
}
if (d_all_leaf_nodes_offsets_in_all_ALPHA == NULL){
err = cudaMalloc((void **)&d_all_leaf_nodes_offsets_in_all_ALPHA, size);
err = cudaMemcpy(d_all_leaf_nodes_offsets_in_all_ALPHA, all_leaf_nodes_offsets_in_all_ALPHA, size, cudaMemcpyHostToDevice);
}
if (d_all_leaf_nodes_ancestor_nodes_ids == NULL){
err = cudaMalloc((void **)&d_all_leaf_nodes_ancestor_nodes_ids, size);
err = cudaMemcpy(d_all_leaf_nodes_ancestor_nodes_ids, all_leaf_nodes_ancestor_nodes_ids, size, cudaMemcpyHostToDevice);
}
if (d_leaf_nodes_start_pos_in_sorted_data_set == NULL){
err = cudaMalloc((void **)&d_leaf_nodes_start_pos_in_sorted_data_set, size);
err = cudaMemcpy(d_leaf_nodes_start_pos_in_sorted_data_set, leaf_nodes_start_pos_in_sorted_data_set, size, cudaMemcpyHostToDevice);
}
if (d_pts_num_in_sorted_leaf_nodes == NULL){
err = cudaMalloc((void **)&d_pts_num_in_sorted_leaf_nodes, size);
err = cudaMemcpy(d_pts_num_in_sorted_leaf_nodes, pts_num_in_sorted_leaf_nodes, size, cudaMemcpyHostToDevice);
}
size = candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN;
if (d_dist_k_mins_global_tmp == NULL){
err = cudaMalloc((void **)&d_dist_k_mins_global_tmp, size);
err = cudaMemcpy(d_dist_k_mins_global_tmp, dist_k_mins_global_tmp, size, cudaMemcpyHostToDevice);
}
size = candidate_query_points_num* sizeof(int)* K_NN;
if (d_idx_k_mins_global_tmp == NULL){
err = cudaMalloc((void **)&d_idx_k_mins_global_tmp, size);
err = cudaMemcpy(d_idx_k_mins_global_tmp, idx_k_mins_global_tmp, size, cudaMemcpyHostToDevice);
}
size = sorted_data_len * sizeof(int);
int* d_remain_index = NULL;
err = cudaMalloc((void **)&d_remain_index, size);
err = cudaMemcpy(d_remain_index, remain_index, size, cudaMemcpyHostToDevice);
size = candidate_query_points_num*sizeof(long);
err = cudaMalloc((void **)&d_dist_computation_times_arr, size);
err = cudaMalloc((void **)&d_quadprog_times_arr, size);
err = cudaMalloc((void **)&d_dist_computation_times_in_quadprog, size);
int task_per_num = 100000 / 256;
finish = clock();
duration = (float)(finish - start) / CLOCKS_PER_SEC;
//printf( "\n CUDA Malloc Memory Time %f\n", duration);
//printf ("\n calling do_finding_KNN_by_leaf_order_cuda.....\n");
int block_num = candidate_query_points_num / 1024 + 1;
dim3 blocks(block_num, 1), threads(1024, 1);
//int blocks_per_time = 6 * 16 * 128 * 8;
start1 = clock();
new_do_finding_KNN_by_leaf_order_cuda << < blocks, threads >> >(
candidate_query_points_num,
d_candidate_query_points_indexes,
d_candidate_query_points_set,
d_all_sorted_data_set,
d_sorted_data_set_indexes,
d_tree_struct,
d_all_leaf_nodes_ALPHA_set,
d_all_leaf_nodes_BETA_set,
d_all_constrains_num_of_each_leaf_nodes,
d_all_leaf_nodes_offsets_in_all_ALPHA,
leaf_node_num,
d_all_leaf_nodes_ancestor_nodes_ids,
d_leaf_nodes_start_pos_in_sorted_data_set,
d_pts_num_in_sorted_leaf_nodes,
d_dist_k_mins_global_tmp,
d_idx_k_mins_global_tmp,
d_remain_index,
K_NN,
d_dist_computation_times_arr,
d_quadprog_times_arr,
d_dist_computation_times_in_quadprog,
NODES_NUM,
DIM,
1);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
//exit(EXIT_FAILURE);
}
finish1 = clock();
duration = (float)(finish1 - start1) / CLOCKS_PER_SEC;
//printf( "\n performing do_finding_KNN_by_leaf_order_cuda time %f milsec %f s\n",(float)(finish1-start1), duration);
//printf( "----print device matrix-------");
//print_float_Data <<<4, 256>>>(d_dist_k_mins_global_tmp,d_idx_k_mins_global_tmp,0);
// Copy the device result vector in device memory to the host result vector
// in host memory.
//printf("\n copy data from GPU....\n");
size = candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN;
err = cudaMemcpy(dist_k_mins_global_tmp, d_dist_k_mins_global_tmp, size, cudaMemcpyDeviceToHost);
size = candidate_query_points_num* sizeof(int)* K_NN;
err = cudaMemcpy(idx_k_mins_global_tmp, d_idx_k_mins_global_tmp, size, cudaMemcpyDeviceToHost);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
//exit(EXIT_FAILURE);
}
free_cuda_mem();
return 0;
}
__global__ void kernel_do_find_approximate_nodes(
int candidate_query_points_num,
FLOAT_TYPE *candidate_query_points_set,
int tree_nodes_num,
CONVEX_TREE *tree_struct,
FLOAT_TYPE *nodes_centers,
int *appr_leaf_node_indexes,
int DIM
){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= candidate_query_points_num) return;
FLOAT_TYPE *q = candidate_query_points_set + tid * DIM;
int cur_node_index=0;
while (tree_struct[cur_node_index].isLeaf == false){
FLOAT_TYPE left_dist_squre = float_dist_squre_cuda(q, nodes_centers+tree_struct[cur_node_index].left_node*DIM,DIM);
FLOAT_TYPE right_dist_squre = float_dist_squre_cuda(q, nodes_centers+tree_struct[cur_node_index].right_node*DIM ,DIM);
//count the distance computation times.
if (left_dist_squre >= right_dist_squre){
cur_node_index = tree_struct[cur_node_index].right_node;
}
else{
cur_node_index = tree_struct[cur_node_index].left_node;
}
}
appr_leaf_node_indexes[tid] = tree_struct[cur_node_index].leaf_index;
}
extern "C" void call_do_find_approximate_nodes(
int candidate_query_points_num,
FLOAT_TYPE *candidate_query_points_set,
int tree_nodes_num,
CONVEX_TREE *tree_struct,
FLOAT_TYPE *nodes_centers,
int *candidate_query_points_appr_leaf_node_indexes,
int DIM
){
size_t size = candidate_query_points_num * sizeof(int);
cudaError_t err = cudaSuccess;
size = candidate_query_points_num * sizeof(FLOAT_TYPE)*DIM;
err = cudaMalloc((void **)&d_candidate_query_points_set, size);
err = cudaMemcpy(d_candidate_query_points_set, candidate_query_points_set, size, cudaMemcpyHostToDevice);
size = tree_nodes_num * sizeof(CONVEX_TREE);
err = cudaMalloc((void **)&d_tree_struct, size);
err = cudaMemcpy(d_tree_struct, tree_struct, size, cudaMemcpyHostToDevice);
size = candidate_query_points_num * sizeof(int);
err = cudaMalloc((void **)&d_candidate_query_points_appr_leaf_node_indexes, size);
size= tree_nodes_num*sizeof(FLOAT_TYPE)*DIM;
err = cudaMalloc((void **)&d_nodes_centers, size);
err = cudaMemcpy(d_nodes_centers, nodes_centers, size, cudaMemcpyHostToDevice);
int block_num =candidate_query_points_num/1024 +1;
dim3 blocks(block_num,1), threads(1024,1);
kernel_do_find_approximate_nodes<<<blocks, threads>>>( candidate_query_points_num,
d_candidate_query_points_set,
tree_nodes_num,
d_tree_struct,
d_nodes_centers,
d_candidate_query_points_appr_leaf_node_indexes,
DIM);
err = cudaGetLastError();
if (err != cudaSuccess){
printf("Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
}
size = candidate_query_points_num * sizeof(int);
err = cudaMemcpy(candidate_query_points_appr_leaf_node_indexes, d_candidate_query_points_appr_leaf_node_indexes, size, cudaMemcpyDeviceToHost);
err = cudaGetLastError();
if (err != cudaSuccess){
printf("Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
}
}
/**
* Host main routine
*/
/*
extern "C" int call_cuda_kernel_brute_force_and_update(
int candidate_query_points_num,
int *candidate_query_points_indexes,
FLOAT_TYPE *candidate_query_points_set,
int data_set_size,
FLOAT_TYPE *data_set,
FLOAT_TYPE *KNN_index_with_dist,
int K_NN,
int DIM)
{
clock_t start, finish, start1, finish1;
bool cuda_init = init_CUDA_device();
if (cuda_init){
printf("succed for initializing CUDA\n");
}
cudaError_t err = cudaSuccess;
//Launch the Vector Add CUDA Kernel
printf("CUDA Malloc Memory.....\n");
start = clock();
size_t size = candidate_query_points_num * sizeof(int);
int *d_candidate_query_points_indexes = NULL;
err = cudaMalloc((void **)&d_candidate_query_points_indexes, size);
err = cudaMemcpy(d_candidate_query_points_indexes, candidate_query_points_indexes, size, cudaMemcpyHostToDevice);
size = candidate_query_points_num * sizeof(FLOAT_TYPE)*DIM;
FLOAT_TYPE *d_candidate_query_points_set = NULL;
err = cudaMalloc((void **)&d_candidate_query_points_set, size);
err = cudaMemcpy(d_candidate_query_points_set, candidate_query_points_set, size, cudaMemcpyHostToDevice);
size = data_set_size * sizeof(FLOAT_TYPE)*DIM;
FLOAT_TYPE *d_data_set = NULL;
err = cudaMalloc((void **)&d_data_set, size);
err = cudaMemcpy(d_data_set, data_set, size, cudaMemcpyHostToDevice);
size = candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN * 2;
FLOAT_TYPE *d_KNN_index_with_dist = NULL;
err = cudaMalloc((void **)&d_KNN_index_with_dist, size);
finish = clock();
double duration = (double)(finish - start) / CLOCKS_PER_SEC;
printf("CUDA Malloc Memory Time %fs\n", duration);
printf("calling do_finding_KNN_by_leaf_order_cuda.....\n");
dim3 grids(2, 1), blocks(6, 1), threads(1024, 1);
start1 = clock();
do_brute_force_KNN_cuda << < blocks, threads >> >
(d_data_set, data_set_size, d_candidate_query_points_set, candidate_query_points_num,
d_KNN_index_with_dist, K_NN, DIM);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
//exit(EXIT_FAILURE);
}
//printf( "----print device matrix-------");
//print_float_Data <<<4, 256>>>(d_dist_k_mins_global_tmp,d_idx_k_mins_global_tmp,0);
// Copy the device result vector in device memory to the host result vector
// in host memory.
//printf("\n copy data from GPU....\n");
size = candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN * 2;
err = cudaMemcpy(KNN_index_with_dist, d_KNN_index_with_dist, size, cudaMemcpyDeviceToHost);
//size= candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN;
//err = cudaMemcpy(dist_k_mins_global_tmp, d_dist_k_mins_global_tmp, size, cudaMemcpyDeviceToHost);
//size= candidate_query_points_num* sizeof(int)* K_NN;
//err = cudaMemcpy(idx_k_mins_global_tmp, d_idx_k_mins_global_tmp, size, cudaMemcpyDeviceToHost);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
//exit(EXIT_FAILURE);
}
finish1 = clock();
duration = (double)(finish1 - start1) / CLOCKS_PER_SEC;
printf("performing do_finding_KNN_by_leaf_order_cuda time %fs\n", duration);
return 0;
}
*/
/**
* Host main routine
*/
extern "C" int call_cuda_kernel_brute_force(
int candidate_query_points_num,
int *candidate_query_points_indexes,
FLOAT_TYPE *candidate_query_points_set,
int data_set_size,
FLOAT_TYPE *data_set,
FLOAT_TYPE *dist_k_mins_global_tmp,
int *idx_k_mins_global_tmp,
int K_NN,
int DIM)
{
clock_t start, finish,start1, finish1;
bool cuda_init =init_CUDA_device();
if (cuda_init){
printf("succed for initializing CUDA\n");
}
cudaError_t err = cudaSuccess;
//Launch the Vector Add CUDA Kernel
printf("CUDA Malloc Memory.....\n");
start=clock();
size_t size = candidate_query_points_num * sizeof(int);
int *d_candidate_query_points_indexes = NULL;
err = cudaMalloc((void **)&d_candidate_query_points_indexes, size);
err = cudaMemcpy(d_candidate_query_points_indexes, candidate_query_points_indexes, size, cudaMemcpyHostToDevice);
for (int i = 0; i < 100; i++){
for (int j = 0; j < DIM; j++){
printf("%.1f ", candidate_query_points_set[i*DIM + j]);
}
printf("\n");
}
size = candidate_query_points_num * sizeof(FLOAT_TYPE)*DIM;
FLOAT_TYPE *d_candidate_query_points_set=NULL;
err = cudaMalloc((void **)&d_candidate_query_points_set, size);
err = cudaMemcpy(d_candidate_query_points_set, candidate_query_points_set, size, cudaMemcpyHostToDevice);
size = data_set_size * sizeof(FLOAT_TYPE)*DIM;
FLOAT_TYPE *d_data_set=NULL;
err = cudaMalloc((void **)&d_data_set, size);
err = cudaMemcpy(d_data_set, data_set, size, cudaMemcpyHostToDevice);
size= candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN;
FLOAT_TYPE *d_dist_k_mins_global_dist = NULL;
err = cudaMalloc((void **)&d_dist_k_mins_global_dist, size);
size= candidate_query_points_num* sizeof(int)* K_NN;
int *d_idx_k_mins_global_tmp=NULL;
err = cudaMalloc((void **)&d_idx_k_mins_global_tmp, size);
finish=clock();
double duration = (double)(finish-start)/ CLOCKS_PER_SEC;
printf( "CUDA Malloc Memory Time %fs\n", duration);
printf ("calling do_finding_KNN_by_leaf_order_cuda.....\n");
int block_num = candidate_query_points_num / 1024 + 1;
dim3 blocks(block_num, 1), threads(1024, 1);
start1=clock();
do_brute_force_KNN_cuda<<< blocks, threads>>>
(d_data_set, data_set_size, d_candidate_query_points_set, candidate_query_points_num,
d_dist_k_mins_global_dist, d_idx_k_mins_global_tmp, K_NN, DIM);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
//exit(EXIT_FAILURE);
}
printf("ending......\n");
size= candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN;
err = cudaMemcpy(dist_k_mins_global_tmp, d_dist_k_mins_global_tmp, size, cudaMemcpyDeviceToHost);
size= candidate_query_points_num* sizeof(int)* K_NN;
err = cudaMemcpy(idx_k_mins_global_tmp, d_idx_k_mins_global_tmp, size, cudaMemcpyDeviceToHost);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
//exit(EXIT_FAILURE);
}
finish1=clock();
duration = (double)(finish1-start1)/ CLOCKS_PER_SEC;
printf( "performing do_finding_KNN_by_leaf_order_cuda time %fs\n", duration);
return 0;
}
/**
* Host main routine
*/
extern "C" int call_cuda_kernel_brute_force_for_outliers(
int candidate_query_points_num,
int *candidate_query_points_indexes,
FLOAT_TYPE *candidate_query_points_set,
int data_set_size,
FLOAT_TYPE *data_set,
int *data_indexes,
FLOAT_TYPE *dist_k_mins_global_tmp,
int *idx_k_mins_global_tmp,
int K_NN,
int DIM)
{
clock_t start, finish, start1, finish1;
bool cuda_init = init_CUDA_device();
/*
if (cuda_init){
printf("succed for initializing CUDA\n");
}
*/
cudaError_t err = cudaSuccess;
//Launch the Vector Add CUDA Kernel
//printf("CUDA Malloc Memory.....\n");
start = clock();
size_t size = candidate_query_points_num * sizeof(int);
int *d_candidate_query_points_indexes = NULL;
err = cudaMalloc((void **)&d_candidate_query_points_indexes, size);
err = cudaMemcpy(d_candidate_query_points_indexes, candidate_query_points_indexes, size, cudaMemcpyHostToDevice);
size = candidate_query_points_num * sizeof(FLOAT_TYPE)*DIM;
FLOAT_TYPE *d_candidate_query_points_set = NULL;
err = cudaMalloc((void **)&d_candidate_query_points_set, size);
err = cudaMemcpy(d_candidate_query_points_set, candidate_query_points_set, size, cudaMemcpyHostToDevice);
size = data_set_size * sizeof(FLOAT_TYPE)*DIM;
FLOAT_TYPE *d_data_set = NULL;
err = cudaMalloc((void **)&d_data_set, size);
err = cudaMemcpy(d_data_set, data_set, size, cudaMemcpyHostToDevice);
size = data_set_size * sizeof(int);
int *d_data_indexes = NULL;
err = cudaMalloc((void **)&d_data_indexes, size);
err = cudaMemcpy(d_data_indexes, data_indexes, size, cudaMemcpyHostToDevice);
size = candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN;
FLOAT_TYPE *d_dist_k_mins_global_dist = NULL;
err = cudaMalloc((void **)&d_dist_k_mins_global_dist, size);
err = cudaMemcpy(d_dist_k_mins_global_dist, dist_k_mins_global_tmp, size, cudaMemcpyHostToDevice);
size = candidate_query_points_num* sizeof(int)* K_NN;
int *d_idx_k_mins_global_tmp = NULL;
err = cudaMalloc((void **)&d_idx_k_mins_global_tmp, size);
err = cudaMemcpy(d_idx_k_mins_global_tmp, idx_k_mins_global_tmp, size, cudaMemcpyHostToDevice);
finish = clock();
double duration = (double)(finish - start) / CLOCKS_PER_SEC;
//printf("CUDA Malloc Memory Time %fs\n", duration);
//printf("calling do_finding_KNN_by_leaf_order_cuda.....\n");
int block_num = candidate_query_points_num / 1024 + 1;
dim3 blocks(block_num, 1), threads(1024, 1);
start1 = clock();
do_brute_force_KNN_for_outliers_cuda << < blocks, threads >> >
(d_data_set, data_set_size, d_data_indexes, d_candidate_query_points_set, candidate_query_points_num,
d_dist_k_mins_global_dist, d_idx_k_mins_global_tmp, K_NN, DIM);
err = cudaGetLastError();
if (err != cudaSuccess)
{
//fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
//exit(EXIT_FAILURE);
}
//printf( "----print device matrix-------");
//print_float_Data <<<4, 256>>>(d_dist_k_mins_global_tmp,d_idx_k_mins_global_tmp,0);
// Copy the device result vector in device memory to the host result vector
// in host memory.
//printf("\n copy data from GPU....\n");
size = candidate_query_points_num* sizeof(FLOAT_TYPE)* K_NN;
err = cudaMemcpy(dist_k_mins_global_tmp, d_dist_k_mins_global_tmp, size, cudaMemcpyDeviceToHost);
size = candidate_query_points_num* sizeof(int)* K_NN;
err = cudaMemcpy(idx_k_mins_global_tmp, d_idx_k_mins_global_tmp, size, cudaMemcpyDeviceToHost);
err = cudaGetLastError();
if (err != cudaSuccess)
{
//fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
//exit(EXIT_FAILURE);
}
finish1 = clock();
duration = (double)(finish1 - start1) / CLOCKS_PER_SEC;
//printf("performing do_finding_KNN_by_leaf_order_cuda time %fs\n", duration);
return 0;
}
|
79c251ebd5d5f9e1fca880286a90c9d8dbaf4039.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
const unsigned int filter_radius=16;
#define THREADS_PER_BLOCK 1024
hipError_t code;
#define CUDA_ERROR_CHECK(n) \
code = hipGetLastError(); \
if ( code != hipSuccess ) {\
printf("**** Error at num %d hipGetLastError().*********\n", n ); \
printf("Type of error: %s\n", hipGetErrorString( code )); \
}
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 0.00005
__constant__ __device__ float d_Filter[FILTER_LENGTH];
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(
float *h_Dst,
float *h_Src,
float *h_Filter,
int imageW,
int imageH,
int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
/*
* GPU convolution Rows
*/
__global__ void convolutionRowGPU(
float *d_Dst,
float *d_Src,
/*float *d_Filter,*/
int imageW,
int imageH,
int filterR)
{
extern __shared__ float sh_Src[];
/* blockDim.x=32, blockDim.y=32
* blockIdx.x = 0-31, blockIdx.y = 0-31
* threadIdx.x = {0-31}
*/
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int index = row*imageW+col;
if (threadIdx.x == 31 && threadIdx.y == 31) {
//if (blockIdx.x == 1 || blockIdx.y == 1) {
//if (index ==32*32+0) {
printf("EKTUPWTHIKEEEEEE...\nindex=%d, blockDim.y=%d, blockIdx.y=%d, threadIDx.y=%d\n", index, blockDim.y, blockIdx.y, threadIdx.y);
printf("blockDim.x=%d, blockIdx.x=%d, threadIDx.x=%d\n", blockDim.x, blockIdx.x, threadIdx.x);
printf("gridDim.x=%d, gridDim.y=%d\n", gridDim.x, gridDim.y);
}
sh_Src[index] = d_Src[index];
__syncthreads();
float sum=0;
int x, d, k;
x = index;//threadIdx.x + blockDim.x * blockIdx.x;
for (k = -filterR; k <= filterR; k++) {
d = x%imageW + k;
if (d >= 0 && d < imageW) {
sum += sh_Src[x + k] * d_Filter[filterR - k];
}
d_Dst[x] = sum;
}
}
/*
* GPU convolution Columns
*/
__global__ void convolutionColumnGPU(
float *d_Dst,
float *d_Src,
/*float *d_Filter,*/
int imageW,
int imageH,
int filterR)
{
extern __shared__ float sh_Src[];
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int index = row*imageW+col;
sh_Src[index] = d_Src[index];
__syncthreads();
float sum=0;
int x, d, k;
x = index;//row*imageW+col
for (k = -filterR; k <= filterR; k++) {
d = x/imageW + k;
if (d >= 0 && d < imageH) {
sum += sh_Src[d * imageW + x%imageW] * d_Filter[filterR - k] ;
}
d_Dst[x] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
float *h_Filter, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU, *d_Input,
*d_Output_GPU, *d_Buffer /*,*d_Filter*/;
int pointsThatDiffer = 0;
int imageW;
int imageH;
unsigned int i;
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
if (argc < 2) {
printf("Few arguments. Run as ./<name> <image_size>,where <image_size> should be a power of two and greater than 33\n");
return -1;
}
if ( strlen(argv[1]) == 0 ) {
printf("Error at argv[1]. Please give the size of image as 1st argument(e.g. ./exe 100 5\n");
return -1;
}
imageW = atoi(argv[1]);
imageH = imageW;
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (float *)malloc(FILTER_LENGTH * sizeof(float));
h_Input = (float *)malloc(imageW * imageH * sizeof(float) );
h_Buffer = (float *)malloc(imageW * imageH * sizeof(float));
h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float));
/// *** EDITED ***//
hipMalloc( (void **)&d_Input, imageW * imageH * sizeof(float) );
//hipMalloc( (void **)&d_Filter, FILTER_LENGTH * sizeof(float) );
hipMalloc( (void **)&d_Output_GPU, imageW * imageH * sizeof(float) );
hipMalloc( (void **)&d_Buffer, imageW * imageH * sizeof(float) );
h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float));
if ( h_Filter == NULL || h_Input == NULL || h_Buffer == NULL ||
h_OutputCPU==NULL || h_OutputGPU == NULL) {
printf("Error allocating host or device\n");
}
/*
* tsekare an uparxoun sfalmata
*/
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (float)(rand() % 16);
}
for (i = 0; i < (unsigned int)imageW * imageH; i++) {
h_Input[i] = (float)rand() / ((float)RAND_MAX / 255) + (float)rand() / (float)RAND_MAX;
}
hipMemcpy(d_Input,h_Input,imageW*imageH*sizeof(float),hipMemcpyHostToDevice);
CUDA_ERROR_CHECK(1);
code = hipMemcpyToSymbol(
d_Filter,
h_Filter,
FILTER_LENGTH*sizeof( float )
); if (code != hipSuccess) printf("Error copying from host Memory to Constant Memory!\n");
CUDA_ERROR_CHECK(2);
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius);//convolution kata sthles
// create 4x4 thread blocks
dim3 grid_size;
// configure a two dimensional grid as well
dim3 block_size;
if (imageH <= 32) {
grid_size.x = 1;
grid_size.y = 1;
block_size.x = imageH;
block_size.y = imageH;
}
else {
grid_size.x = 1+imageH/32;
grid_size.y = imageH/32;
block_size.x = 32;
block_size.y = 32;
}
printf("grid size: %d\n", grid_size.x);
hipLaunchKernelGGL(( convolutionRowGPU), dim3(grid_size) , dim3(block_size), THREADS_PER_BLOCK*sizeof(float), 0, d_Buffer,
d_Input/*,d_Filter*/, imageH, imageW, filter_radius);
hipDeviceSynchronize();//barrier of host
CUDA_ERROR_CHECK(3);
hipLaunchKernelGGL(( convolutionColumnGPU), dim3(grid_size) , dim3(block_size), THREADS_PER_BLOCK*sizeof(float), 0, d_Output_GPU, d_Buffer,
/*d_Filter,*/ imageH, imageW, filter_radius);
hipDeviceSynchronize();//barrier of host
CUDA_ERROR_CHECK(4);
//return data to host by copying the from global memory to host memory
hipMemcpy(h_OutputGPU, d_Output_GPU, imageW * imageH * sizeof(float),hipMemcpyDeviceToHost);
CUDA_ERROR_CHECK(5);
//now compare host results VS device results. Is GPU same as CPU?!
for (i = 0; i < (unsigned int)imageW * imageH; i++) {
if(ABS(h_OutputCPU[i] - h_OutputGPU[i]) > accuracy){
pointsThatDiffer = 1;
printf("The difference between the %dnth element is larger than accuracy. \n CPU: %g GPU %g differece: %.15g \nNow exiting..\n", i,h_OutputCPU[i] ,h_OutputGPU[i], ABS(h_OutputGPU[i] - h_OutputCPU[i]) );
break;
}
}
if (pointsThatDiffer == 0)
printf("******************** Correct: GPU output is the same as CPU output *************\n");
else
printf("******************** Error: GPU output differs from CPU output!!! *************\n");
// free all the allocated memory
free(h_OutputCPU); hipFree(d_Output_GPU);
free(h_Buffer); hipFree(d_Buffer);
free(h_Input); hipFree(d_Input);
free(h_Filter); //hipFree(d_Filter);
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
hipDeviceReset();
CUDA_ERROR_CHECK(6);
return 0;
}
|
79c251ebd5d5f9e1fca880286a90c9d8dbaf4039.cu
|
/*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
const unsigned int filter_radius=16;
#define THREADS_PER_BLOCK 1024
cudaError_t code;
#define CUDA_ERROR_CHECK(n) \
code = cudaGetLastError(); \
if ( code != cudaSuccess ) {\
printf("**** Error at num %d cudaGetLastError().*********\n", n ); \
printf("Type of error: %s\n", cudaGetErrorString( code )); \
}
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 0.00005
__constant__ __device__ float d_Filter[FILTER_LENGTH];
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(
float *h_Dst,
float *h_Src,
float *h_Filter,
int imageW,
int imageH,
int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
/*
* GPU convolution Rows
*/
__global__ void convolutionRowGPU(
float *d_Dst,
float *d_Src,
/*float *d_Filter,*/
int imageW,
int imageH,
int filterR)
{
extern __shared__ float sh_Src[];
/* blockDim.x=32, blockDim.y=32
* blockIdx.x = 0-31, blockIdx.y = 0-31
* threadIdx.x = {0-31}
*/
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int index = row*imageW+col;
if (threadIdx.x == 31 && threadIdx.y == 31) {
//if (blockIdx.x == 1 || blockIdx.y == 1) {
//if (index ==32*32+0) {
printf("EKTUPWTHIKEEEEEE...\nindex=%d, blockDim.y=%d, blockIdx.y=%d, threadIDx.y=%d\n", index, blockDim.y, blockIdx.y, threadIdx.y);
printf("blockDim.x=%d, blockIdx.x=%d, threadIDx.x=%d\n", blockDim.x, blockIdx.x, threadIdx.x);
printf("gridDim.x=%d, gridDim.y=%d\n", gridDim.x, gridDim.y);
}
sh_Src[index] = d_Src[index];
__syncthreads();
float sum=0;
int x, d, k;
x = index;//threadIdx.x + blockDim.x * blockIdx.x;
for (k = -filterR; k <= filterR; k++) {
d = x%imageW + k;
if (d >= 0 && d < imageW) {
sum += sh_Src[x + k] * d_Filter[filterR - k];
}
d_Dst[x] = sum;
}
}
/*
* GPU convolution Columns
*/
__global__ void convolutionColumnGPU(
float *d_Dst,
float *d_Src,
/*float *d_Filter,*/
int imageW,
int imageH,
int filterR)
{
extern __shared__ float sh_Src[];
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int index = row*imageW+col;
sh_Src[index] = d_Src[index];
__syncthreads();
float sum=0;
int x, d, k;
x = index;//row*imageW+col
for (k = -filterR; k <= filterR; k++) {
d = x/imageW + k;
if (d >= 0 && d < imageH) {
sum += sh_Src[d * imageW + x%imageW] * d_Filter[filterR - k] ;
}
d_Dst[x] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
float *h_Filter, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU, *d_Input,
*d_Output_GPU, *d_Buffer /*,*d_Filter*/;
int pointsThatDiffer = 0;
int imageW;
int imageH;
unsigned int i;
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
if (argc < 2) {
printf("Few arguments. Run as ./<name> <image_size>,where <image_size> should be a power of two and greater than 33\n");
return -1;
}
if ( strlen(argv[1]) == 0 ) {
printf("Error at argv[1]. Please give the size of image as 1st argument(e.g. ./exe 100 5\n");
return -1;
}
imageW = atoi(argv[1]);
imageH = imageW;
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (float *)malloc(FILTER_LENGTH * sizeof(float));
h_Input = (float *)malloc(imageW * imageH * sizeof(float) );
h_Buffer = (float *)malloc(imageW * imageH * sizeof(float));
h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float));
/// *** EDITED ***//
cudaMalloc( (void **)&d_Input, imageW * imageH * sizeof(float) );
//cudaMalloc( (void **)&d_Filter, FILTER_LENGTH * sizeof(float) );
cudaMalloc( (void **)&d_Output_GPU, imageW * imageH * sizeof(float) );
cudaMalloc( (void **)&d_Buffer, imageW * imageH * sizeof(float) );
h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float));
if ( h_Filter == NULL || h_Input == NULL || h_Buffer == NULL ||
h_OutputCPU==NULL || h_OutputGPU == NULL) {
printf("Error allocating host or device\n");
}
/*
* tsekare an uparxoun sfalmata
*/
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (float)(rand() % 16);
}
for (i = 0; i < (unsigned int)imageW * imageH; i++) {
h_Input[i] = (float)rand() / ((float)RAND_MAX / 255) + (float)rand() / (float)RAND_MAX;
}
cudaMemcpy(d_Input,h_Input,imageW*imageH*sizeof(float),cudaMemcpyHostToDevice);
CUDA_ERROR_CHECK(1);
code = cudaMemcpyToSymbol(
d_Filter,
h_Filter,
FILTER_LENGTH*sizeof( float )
); if (code != cudaSuccess) printf("Error copying from host Memory to Constant Memory!\n");
CUDA_ERROR_CHECK(2);
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius);//convolution kata sthles
// create 4x4 thread blocks
dim3 grid_size;
// configure a two dimensional grid as well
dim3 block_size;
if (imageH <= 32) {
grid_size.x = 1;
grid_size.y = 1;
block_size.x = imageH;
block_size.y = imageH;
}
else {
grid_size.x = 1+imageH/32;
grid_size.y = imageH/32;
block_size.x = 32;
block_size.y = 32;
}
printf("grid size: %d\n", grid_size.x);
convolutionRowGPU<<<grid_size , block_size, THREADS_PER_BLOCK*sizeof(float)>>>(d_Buffer,
d_Input/*,d_Filter*/, imageH, imageW, filter_radius);
cudaThreadSynchronize();//barrier of host
CUDA_ERROR_CHECK(3);
convolutionColumnGPU<<<grid_size , block_size, THREADS_PER_BLOCK*sizeof(float)>>>(d_Output_GPU, d_Buffer,
/*d_Filter,*/ imageH, imageW, filter_radius);
cudaThreadSynchronize();//barrier of host
CUDA_ERROR_CHECK(4);
//return data to host by copying the from global memory to host memory
cudaMemcpy(h_OutputGPU, d_Output_GPU, imageW * imageH * sizeof(float),cudaMemcpyDeviceToHost);
CUDA_ERROR_CHECK(5);
//now compare host results VS device results. Is GPU same as CPU?!
for (i = 0; i < (unsigned int)imageW * imageH; i++) {
if(ABS(h_OutputCPU[i] - h_OutputGPU[i]) > accuracy){
pointsThatDiffer = 1;
printf("The difference between the %dnth element is larger than accuracy. \n CPU: %g GPU %g differece: %.15g \nNow exiting..\n", i,h_OutputCPU[i] ,h_OutputGPU[i], ABS(h_OutputGPU[i] - h_OutputCPU[i]) );
break;
}
}
if (pointsThatDiffer == 0)
printf("******************** Correct: GPU output is the same as CPU output *************\n");
else
printf("******************** Error: GPU output differs from CPU output!!! *************\n");
// free all the allocated memory
free(h_OutputCPU); cudaFree(d_Output_GPU);
free(h_Buffer); cudaFree(d_Buffer);
free(h_Input); cudaFree(d_Input);
free(h_Filter); //cudaFree(d_Filter);
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
cudaDeviceReset();
CUDA_ERROR_CHECK(6);
return 0;
}
|
32dee9428a13d4b2a0beb418d233577c43c398c1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/operators/reader/nvdecoder/imgproc.h"
#include <hip/hip_fp16.h>
namespace dali {
namespace {
// using math from https://msdn.microsoft.com/en-us/library/windows/desktop/dd206750(v=vs.85).aspx
template<typename T>
struct YCbCr {
T y, cb, cr;
};
// https://docs.microsoft.com/en-gb/windows/desktop/medfound/recommended-8-bit-yuv-formats-for-video-rendering#converting-8-bit-yuv-to-rgb888
__constant__ float ycbcr2rgb_mat_norm[9] = {
1.164383f, 0.0f, 1.596027f,
1.164383f, -0.391762f, -0.812968f,
1.164383f, 2.017232f, 0.0f
};
// not normalized need *255
__constant__ float ycbcr2rgb_mat[9] = {
1.164383f * 255.0f, 0.0f, 1.596027f * 255.0f,
1.164383f * 255.0f, -0.391762f * 255.0f, -0.812968f * 255.0f,
1.164383f * 255.0f, 2.017232f * 255.0f, 0.0f
};
__device__ float clip(float x, float max) {
return fminf(fmaxf(x, 0.0f), max);
}
template<typename T>
__device__ T convert(const float x) {
return static_cast<T>(x);
}
#if 0
template<>
__device__ half convert<half>(const float x) {
return __float2half(x);
}
template<>
__device__ uint8_t convert<uint8_t>(const float x) {
return static_cast<uint8_t>(roundf(x));
}
#endif
template<typename YCbCr_T, typename RGB_T, bool Normalized = false>
__device__ void ycbcr2rgb(const YCbCr<YCbCr_T>& ycbcr, RGB_T* rgb,
size_t stride) {
auto y = (static_cast<float>(ycbcr.y) - 16.0f/255.0f);
auto cb = (static_cast<float>(ycbcr.cb) - 128.0f/255.0f);
auto cr = (static_cast<float>(ycbcr.cr) - 128.0f/255.0f);
float r, g, b;
if (Normalized) {
auto& m = ycbcr2rgb_mat_norm;
r = clip(y*m[0] + cb*m[1] + cr*m[2], 1.0f);
g = clip(y*m[3] + cb*m[4] + cr*m[5], 1.0f);
b = clip(y*m[6] + cb*m[7] + cr*m[8], 1.0f);
} else {
auto& m = ycbcr2rgb_mat;
r = clip(y*m[0] + cb*m[1] + cr*m[2], 255.0f);
g = clip(y*m[3] + cb*m[4] + cr*m[5], 255.0f);
b = clip(y*m[6] + cb*m[7] + cr*m[8], 255.0f);
}
rgb[0] = convert<RGB_T>(r);
rgb[stride] = convert<RGB_T>(g);
rgb[stride*2] = convert<RGB_T>(b);
}
template<typename T, bool Normalized = false, bool RGB = true>
__global__ void process_frame_kernel(
hipTextureObject_t luma, hipTextureObject_t chroma,
T* dst, int index,
float fx, float fy,
int dst_width, int dst_height, int c) {
const int dst_x = blockIdx.x * blockDim.x + threadIdx.x;
const int dst_y = blockIdx.y * blockDim.y + threadIdx.y;
if (dst_x >= dst_width || dst_y >= dst_height)
return;
auto src_x = 0.0f;
// TODO(spanev) something less hacky here, why 4:2:0 fails on this edge?
float shift = (dst_x == dst_width - 1) ? 0 : 0.5f;
src_x = static_cast<float>(dst_x) * fx + shift;
auto src_y = static_cast<float>(dst_y) * fy + shift;
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#tex2d-object
YCbCr<float> ycbcr;
ycbcr.y = tex2D<float>(luma, src_x, src_y);
auto cbcr = tex2D<float2>(chroma, src_x * 0.5f, src_y * 0.5f);
ycbcr.cb = cbcr.x;
ycbcr.cr = cbcr.y;
auto* out = &dst[(dst_x + dst_y * dst_width) * c];
constexpr size_t stride = 1;
if (RGB) {
ycbcr2rgb<float, T, Normalized>(ycbcr, out, stride);
} else {
constexpr float scaling = Normalized ? 1.0f : 255.0f;
out[0] = convert<T>(ycbcr.y * scaling);
out[stride] = convert<T>(ycbcr.cb * scaling);
out[stride*2] = convert<T>(ycbcr.cr * scaling);
}
}
inline constexpr int divUp(int total, int grain) {
return (total + grain - 1) / grain;
}
} // namespace
template<typename T>
void process_frame(
hipTextureObject_t chroma, hipTextureObject_t luma,
SequenceWrapper& output, int index, hipStream_t stream,
uint16_t input_width, uint16_t input_height,
bool rgb, bool normalized) {
auto scale_width = input_width;
auto scale_height = input_height;
auto fx = static_cast<float>(input_width) / scale_width;
auto fy = static_cast<float>(input_height) / scale_height;
dim3 block(32, 8);
dim3 grid(divUp(output.width, block.x), divUp(output.height, block.y));
int frame_stride = index * output.height * output.width * output.channels;
LOG_LINE << "Processing frame " << index
<< " (frame_stride=" << frame_stride << ")" << std::endl;
auto* tensor_out = output.sequence.mutable_data<T>() + frame_stride;
if (normalized) {
if (rgb) {
hipLaunchKernelGGL(( process_frame_kernel<T, true, true>), dim3(grid), dim3(block), 0, stream,
luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels);
} else {
hipLaunchKernelGGL(( process_frame_kernel<T, true, false>), dim3(grid), dim3(block), 0, stream,
luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels);
}
} else {
if (rgb) {
hipLaunchKernelGGL(( process_frame_kernel<T, false, true>), dim3(grid), dim3(block), 0, stream,
luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels);
} else {
hipLaunchKernelGGL(( process_frame_kernel<T, false, false>), dim3(grid), dim3(block), 0, stream,
luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels);
}
}
}
template
void process_frame<float>(
hipTextureObject_t chroma, hipTextureObject_t luma,
SequenceWrapper& output, int index, hipStream_t stream,
uint16_t input_width, uint16_t input_height,
bool rgb, bool normalized);
template
void process_frame<uint8_t>(
hipTextureObject_t chroma, hipTextureObject_t luma,
SequenceWrapper& output, int index, hipStream_t stream,
uint16_t input_width, uint16_t input_height,
bool rgb, bool normalized);
} // namespace dali
|
32dee9428a13d4b2a0beb418d233577c43c398c1.cu
|
// Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/operators/reader/nvdecoder/imgproc.h"
#include <cuda_fp16.h>
namespace dali {
namespace {
// using math from https://msdn.microsoft.com/en-us/library/windows/desktop/dd206750(v=vs.85).aspx
template<typename T>
struct YCbCr {
T y, cb, cr;
};
// https://docs.microsoft.com/en-gb/windows/desktop/medfound/recommended-8-bit-yuv-formats-for-video-rendering#converting-8-bit-yuv-to-rgb888
__constant__ float ycbcr2rgb_mat_norm[9] = {
1.164383f, 0.0f, 1.596027f,
1.164383f, -0.391762f, -0.812968f,
1.164383f, 2.017232f, 0.0f
};
// not normalized need *255
__constant__ float ycbcr2rgb_mat[9] = {
1.164383f * 255.0f, 0.0f, 1.596027f * 255.0f,
1.164383f * 255.0f, -0.391762f * 255.0f, -0.812968f * 255.0f,
1.164383f * 255.0f, 2.017232f * 255.0f, 0.0f
};
__device__ float clip(float x, float max) {
return fminf(fmaxf(x, 0.0f), max);
}
template<typename T>
__device__ T convert(const float x) {
return static_cast<T>(x);
}
#if 0
template<>
__device__ half convert<half>(const float x) {
return __float2half(x);
}
template<>
__device__ uint8_t convert<uint8_t>(const float x) {
return static_cast<uint8_t>(roundf(x));
}
#endif
template<typename YCbCr_T, typename RGB_T, bool Normalized = false>
__device__ void ycbcr2rgb(const YCbCr<YCbCr_T>& ycbcr, RGB_T* rgb,
size_t stride) {
auto y = (static_cast<float>(ycbcr.y) - 16.0f/255.0f);
auto cb = (static_cast<float>(ycbcr.cb) - 128.0f/255.0f);
auto cr = (static_cast<float>(ycbcr.cr) - 128.0f/255.0f);
float r, g, b;
if (Normalized) {
auto& m = ycbcr2rgb_mat_norm;
r = clip(y*m[0] + cb*m[1] + cr*m[2], 1.0f);
g = clip(y*m[3] + cb*m[4] + cr*m[5], 1.0f);
b = clip(y*m[6] + cb*m[7] + cr*m[8], 1.0f);
} else {
auto& m = ycbcr2rgb_mat;
r = clip(y*m[0] + cb*m[1] + cr*m[2], 255.0f);
g = clip(y*m[3] + cb*m[4] + cr*m[5], 255.0f);
b = clip(y*m[6] + cb*m[7] + cr*m[8], 255.0f);
}
rgb[0] = convert<RGB_T>(r);
rgb[stride] = convert<RGB_T>(g);
rgb[stride*2] = convert<RGB_T>(b);
}
template<typename T, bool Normalized = false, bool RGB = true>
__global__ void process_frame_kernel(
cudaTextureObject_t luma, cudaTextureObject_t chroma,
T* dst, int index,
float fx, float fy,
int dst_width, int dst_height, int c) {
const int dst_x = blockIdx.x * blockDim.x + threadIdx.x;
const int dst_y = blockIdx.y * blockDim.y + threadIdx.y;
if (dst_x >= dst_width || dst_y >= dst_height)
return;
auto src_x = 0.0f;
// TODO(spanev) something less hacky here, why 4:2:0 fails on this edge?
float shift = (dst_x == dst_width - 1) ? 0 : 0.5f;
src_x = static_cast<float>(dst_x) * fx + shift;
auto src_y = static_cast<float>(dst_y) * fy + shift;
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#tex2d-object
YCbCr<float> ycbcr;
ycbcr.y = tex2D<float>(luma, src_x, src_y);
auto cbcr = tex2D<float2>(chroma, src_x * 0.5f, src_y * 0.5f);
ycbcr.cb = cbcr.x;
ycbcr.cr = cbcr.y;
auto* out = &dst[(dst_x + dst_y * dst_width) * c];
constexpr size_t stride = 1;
if (RGB) {
ycbcr2rgb<float, T, Normalized>(ycbcr, out, stride);
} else {
constexpr float scaling = Normalized ? 1.0f : 255.0f;
out[0] = convert<T>(ycbcr.y * scaling);
out[stride] = convert<T>(ycbcr.cb * scaling);
out[stride*2] = convert<T>(ycbcr.cr * scaling);
}
}
inline constexpr int divUp(int total, int grain) {
return (total + grain - 1) / grain;
}
} // namespace
template<typename T>
void process_frame(
cudaTextureObject_t chroma, cudaTextureObject_t luma,
SequenceWrapper& output, int index, cudaStream_t stream,
uint16_t input_width, uint16_t input_height,
bool rgb, bool normalized) {
auto scale_width = input_width;
auto scale_height = input_height;
auto fx = static_cast<float>(input_width) / scale_width;
auto fy = static_cast<float>(input_height) / scale_height;
dim3 block(32, 8);
dim3 grid(divUp(output.width, block.x), divUp(output.height, block.y));
int frame_stride = index * output.height * output.width * output.channels;
LOG_LINE << "Processing frame " << index
<< " (frame_stride=" << frame_stride << ")" << std::endl;
auto* tensor_out = output.sequence.mutable_data<T>() + frame_stride;
if (normalized) {
if (rgb) {
process_frame_kernel<T, true, true><<<grid, block, 0, stream>>>
(luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels);
} else {
process_frame_kernel<T, true, false><<<grid, block, 0, stream>>>
(luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels);
}
} else {
if (rgb) {
process_frame_kernel<T, false, true><<<grid, block, 0, stream>>>
(luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels);
} else {
process_frame_kernel<T, false, false><<<grid, block, 0, stream>>>
(luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels);
}
}
}
template
void process_frame<float>(
cudaTextureObject_t chroma, cudaTextureObject_t luma,
SequenceWrapper& output, int index, cudaStream_t stream,
uint16_t input_width, uint16_t input_height,
bool rgb, bool normalized);
template
void process_frame<uint8_t>(
cudaTextureObject_t chroma, cudaTextureObject_t luma,
SequenceWrapper& output, int index, cudaStream_t stream,
uint16_t input_width, uint16_t input_height,
bool rgb, bool normalized);
} // namespace dali
|
fab7eef16c13f1b70d0b921bc6233f042403c45c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <iostream>
#include <vector>
#include <random>
#include <chrono>
#include <tuple>
#include <utility>
#include <numeric>
#include <iomanip>
#include "../../shared/include/utility.h"
// Declare a GPU-visible floating point variable in global memory.
__device__ float dResult;
/*
The most basic reduction kernel uses atomic operations to accumulate
the individual inputs in a single, device-wide visible variable.
If you have experience with atomics, it is important to note that the
basic atomicXXX instructions of CUDA have RELAXED semantics (scary!).
That means, the threads that operate atomically on them only agree that
there is a particular order for the accesses to that variable and nothing
else (especially no acquire/release semantics).
*/
__global__ void reduceAtomicGlobal(const float* __restrict input, int N)
{
const int id = threadIdx.x + blockIdx.x * blockDim.x;
/*
Since all blocks must have the same number of threads,
we may have to launch more threads than there are
inputs. Superfluous threads should not try to read
from the input (out of bounds access!)
*/
if (id < N)
atomicAdd(&dResult, input[id]);
}
/*
First improvement: shared memory is much faster than global
memory. Each block can accumulate partial results in isolated
block-wide visible memory. This relieves the contention on
a single global variable that all threads want access to.
*/
__global__ void reduceAtomicShared(const float* __restrict input, int N)
{
const int id = threadIdx.x + blockIdx.x * blockDim.x;
// Declare a shared float for each block
__shared__ float x;
// Only one thread should initialize this shared value
if (threadIdx.x == 0)
x = 0.0f;
/*
Before we continue, we must ensure that all threads
can see this update (initialization) by thread 0
*/
__syncthreads();
/*
Every thread in the block adds its input to the
shared variable of the block.
*/
if (id < N)
atomicAdd(&x, input[id]);
// Wait until all threads have done their part
__syncthreads();
/*
Once they are all done, only one thread must add
the block's partial result to the global variable.
*/
if (threadIdx.x == 0)
atomicAdd(&dResult, x);
}
/*
Second improvement: choosing a more suitable algorithm.
We can exploit the fact that the GPU is massively parallel
and come up with a fitting procedure that uses multiple
iterations. In each iteration, threads accumulate partial
results from the previous iteration. Before, the contented
accesses to one location forced the GPU to perform updates
sequentially O(N). Now, each thread can access its own,
exclusive shared variable in each iteration in parallel,
giving an effective runtime that is closer to O(log N).
*/
template <unsigned int BLOCK_SIZE>
__global__ void reduceShared(const float* __restrict input, int N)
{
const int id = threadIdx.x + blockIdx.x * blockDim.x;
/*
Use a larger shared memory region so that each
thread can store its own partial results
*/
__shared__ float data[BLOCK_SIZE];
/*
Use a new strategy to handle superfluous threads.
To make sure they stay alive and can help with
the reduction, threads without an input simply
produce a '0', which has no effect on the result.
*/
data[threadIdx.x] = (id < N ? input[id] : 0);
/*
log N iterations to complete. In each step, a thread
accumulates two partial values to form the input for
the next iteration. The sum of all partial results
eventually yields the full result of the reduction.
*/
for (int s = blockDim.x / 2; s > 0; s /= 2)
{
/*
In each iteration, we must make sure that all
threads are done writing the updates of the
previous iteration / the initialization.
*/
__syncthreads();
if (threadIdx.x < s)
data[threadIdx.x] += data[threadIdx.x + s];
}
/*
Note: thread 0 is the last thread to combine two
partial results, and the one who writes to global
memory, therefore no synchronization is required
after the last iteration.
*/
if (threadIdx.x == 0)
atomicAdd(&dResult, data[0]);
}
/*
Warp-level improvement: using warp-level primitives to
accelerate the final steps of the reduction. Warps
have a fast lane for communication. They are free
to exchange values in registers when they are being
scheduled for execution. Warps will be formed from
consecutive threads in groups of 32.
*/
template <unsigned int BLOCK_SIZE>
__global__ void reduceShuffle(const float* __restrict input, int N)
{
const int id = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ float data[BLOCK_SIZE];
data[threadIdx.x] = (id < N ? input[id] : 0);
// Only use shared memory until last 32 values
for (int s = blockDim.x / 2; s > 16; s /= 2)
{
__syncthreads();
if (threadIdx.x < s)
data[threadIdx.x] += data[threadIdx.x + s];
}
// The last 32 values can be handled with warp-level primitives
float x = data[threadIdx.x];
if (threadIdx.x < 32)
{
/*
The threads in the first warp shuffle their registers.
This replaces the last 5 iterations of the previous solution.
The mask indicates which threads participate in the shuffle.
The value indicates which register should be shuffled.
The final parameter gives the source thread from which the
current one should receive the shuffled value. Accesses that
are out of range (>= 32) will wrap around, but are not needed
(they will not affect the final result written by thread 0).
In each shuffle, at least half of the threads only participate
so they can provide useful data from the previous shuffle for
lower threads. To keep the code short, we always let all threads
participate, because it is an error to let threads reach a shuffle
instruction that they don't participate in.
*/
x += __shfl_sync(0xFFFFFFFF, x, threadIdx.x + 16);
x += __shfl_sync(0xFFFFFFFF, x, threadIdx.x + 8);
x += __shfl_sync(0xFFFFFFFF, x, threadIdx.x + 4);
x += __shfl_sync(0xFFFFFFFF, x, threadIdx.x + 2);
x += __shfl_sync(0xFFFFFFFF, x, 1);
}
if (threadIdx.x == 0)
atomicAdd(&dResult, x);
}
/*
Final improvement: half of our threads actually idle after
they have loaded data from global memory to shared! Better
to have threads fetch two values at the start and then let
them all do at least some meaningful work. This means that
compared to all other methods, only half the number of
threads must be launched in the grid!
*/
template <unsigned int BLOCK_SIZE>
__global__ void reduceFinal(const float* __restrict input, int N)
{
const int id = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ float data[BLOCK_SIZE];
// Already combine two values upon load from global memory
data[threadIdx.x] = (id < N ? (input[id] + input[id+N/2]) : 0);
for (int s = blockDim.x / 2; s > 16; s /= 2)
{
__syncthreads();
if (threadIdx.x < s)
data[threadIdx.x] += data[threadIdx.x + s];
}
float x = data[threadIdx.x];
if (threadIdx.x < 32)
{
x += __shfl_sync(0xFFFFFFFF, x, threadIdx.x + 16);
x += __shfl_sync(0xFFFFFFFF, x, threadIdx.x + 8);
x += __shfl_sync(0xFFFFFFFF, x, threadIdx.x + 4);
x += __shfl_sync(0xFFFFFFFF, x, threadIdx.x + 2);
x += __shfl_sync(0xFFFFFFFF, x, 1);
}
if (threadIdx.x == 0)
atomicAdd(&dResult, x);
}
int main()
{
std::cout << "==== Sample 08 - Reductions ====\n" << std::endl;
/*
Expected output: Accumulated results from CPU and GPU that
approach 42 * NUM_ITEMS (can vary greatly due to floating point
precision limitations).
With more sophisticated techniques, reported performance of the
GPU versions (measured runtime in ms) should generally decrease.
*/
constexpr unsigned int BLOCK_SIZE = 256;
constexpr unsigned int WARMUP_ITERATIONS = 10;
constexpr unsigned int TIMING_ITERATIONS = 20;
constexpr unsigned int N = 100'000'000;
std::cout << "Producing random inputs...\n" << std::endl;
// Generate some random numbers to reduce
std::vector<float> vals;
float* dValsPtr;
samplesutil::prepareRandomNumbersCPUGPU(N, vals, &dValsPtr);
std::cout << "==== CPU Reduction ====\n" << std::endl;
// A reference value is computed by sequential reduction
std::cout << "Computed CPU value: " << std::accumulate(vals.cbegin(), vals.cend(), 0.0f) << std::endl;
std::cout << "==== GPU Reductions ====\n" << std::endl;
/*
Set up a collection of reductions to evaluate for performance.
Each entry gives a technique's name, the kernel to call, and
the number of threads required for each individual technique.
*/
const std::tuple<const char*, void(*)(const float*, int), unsigned int> reductionTechniques[]
{
{"Atomic Global", reduceAtomicGlobal, N},
{"Atomic Shared", reduceAtomicShared, N},
{"Reduce Shared", reduceShared<BLOCK_SIZE>, N},
{"Reduce Shuffle", reduceShuffle<BLOCK_SIZE>, N},
{"Reduce Final", reduceFinal<BLOCK_SIZE>, N / 2 + 1}
};
// Evaluate each technique separately
for (const auto& [name, func, numThreads] : reductionTechniques)
{
// Compute the smallest grid to start required threads with a given block size
const dim3 blockDim = { BLOCK_SIZE, 1, 1 };
const dim3 gridDim = { (numThreads + BLOCK_SIZE - 1) / BLOCK_SIZE, 1, 1 };
// Run several reductions for GPU to warm up
for (int i = 0; i < WARMUP_ITERATIONS; i++)
hipLaunchKernelGGL(( func), dim3(gridDim), dim3(blockDim), 0, 0, dValsPtr, N);
// Synchronize to ensure CPU only records time after warmup is done
hipDeviceSynchronize();
const auto before = std::chrono::system_clock::now();
float result = 0.0f;
// Run several iterations to get an average measurement
for (int i = 0; i < TIMING_ITERATIONS; i++)
{
// Reset acummulated result to 0 in each run
hipMemcpyToSymbol(dResult, &result, sizeof(float));
hipLaunchKernelGGL(( func), dim3(gridDim), dim3(blockDim), 0, 0, dValsPtr, N);
}
// hipMemcpyFromSymbol will implicitly synchronize CPU and GPU
hipMemcpyFromSymbol(&result, dResult, sizeof(float));
// Can measure time without an extra synchronization
const auto after = std::chrono::system_clock::now();
const auto elapsed = 1000.f * std::chrono::duration_cast<std::chrono::duration<float>>(after - before).count();
std::cout << std::setw(20) << name << "\t" << elapsed / TIMING_ITERATIONS << "ms \t" << result << std::endl;
}
// Free the allocated memory for input
hipFree(dValsPtr);
return 0;
}
/*
Exercises:
1) Do you have any other ideas how the reduction could be improved?
Making it even faster should be quite challenging, but if you have
some suggestions, try them out and see how they affect performance!
*/
|
fab7eef16c13f1b70d0b921bc6233f042403c45c.cu
|
#include <cuda_runtime_api.h>
#include <iostream>
#include <vector>
#include <random>
#include <chrono>
#include <tuple>
#include <utility>
#include <numeric>
#include <iomanip>
#include "../../shared/include/utility.h"
// Declare a GPU-visible floating point variable in global memory.
__device__ float dResult;
/*
The most basic reduction kernel uses atomic operations to accumulate
the individual inputs in a single, device-wide visible variable.
If you have experience with atomics, it is important to note that the
basic atomicXXX instructions of CUDA have RELAXED semantics (scary!).
That means, the threads that operate atomically on them only agree that
there is a particular order for the accesses to that variable and nothing
else (especially no acquire/release semantics).
*/
__global__ void reduceAtomicGlobal(const float* __restrict input, int N)
{
const int id = threadIdx.x + blockIdx.x * blockDim.x;
/*
Since all blocks must have the same number of threads,
we may have to launch more threads than there are
inputs. Superfluous threads should not try to read
from the input (out of bounds access!)
*/
if (id < N)
atomicAdd(&dResult, input[id]);
}
/*
First improvement: shared memory is much faster than global
memory. Each block can accumulate partial results in isolated
block-wide visible memory. This relieves the contention on
a single global variable that all threads want access to.
*/
__global__ void reduceAtomicShared(const float* __restrict input, int N)
{
const int id = threadIdx.x + blockIdx.x * blockDim.x;
// Declare a shared float for each block
__shared__ float x;
// Only one thread should initialize this shared value
if (threadIdx.x == 0)
x = 0.0f;
/*
Before we continue, we must ensure that all threads
can see this update (initialization) by thread 0
*/
__syncthreads();
/*
Every thread in the block adds its input to the
shared variable of the block.
*/
if (id < N)
atomicAdd(&x, input[id]);
// Wait until all threads have done their part
__syncthreads();
/*
Once they are all done, only one thread must add
the block's partial result to the global variable.
*/
if (threadIdx.x == 0)
atomicAdd(&dResult, x);
}
/*
Second improvement: choosing a more suitable algorithm.
We can exploit the fact that the GPU is massively parallel
and come up with a fitting procedure that uses multiple
iterations. In each iteration, threads accumulate partial
results from the previous iteration. Before, the contented
accesses to one location forced the GPU to perform updates
sequentially O(N). Now, each thread can access its own,
exclusive shared variable in each iteration in parallel,
giving an effective runtime that is closer to O(log N).
*/
template <unsigned int BLOCK_SIZE>
__global__ void reduceShared(const float* __restrict input, int N)
{
const int id = threadIdx.x + blockIdx.x * blockDim.x;
/*
Use a larger shared memory region so that each
thread can store its own partial results
*/
__shared__ float data[BLOCK_SIZE];
/*
Use a new strategy to handle superfluous threads.
To make sure they stay alive and can help with
the reduction, threads without an input simply
produce a '0', which has no effect on the result.
*/
data[threadIdx.x] = (id < N ? input[id] : 0);
/*
log N iterations to complete. In each step, a thread
accumulates two partial values to form the input for
the next iteration. The sum of all partial results
eventually yields the full result of the reduction.
*/
for (int s = blockDim.x / 2; s > 0; s /= 2)
{
/*
In each iteration, we must make sure that all
threads are done writing the updates of the
previous iteration / the initialization.
*/
__syncthreads();
if (threadIdx.x < s)
data[threadIdx.x] += data[threadIdx.x + s];
}
/*
Note: thread 0 is the last thread to combine two
partial results, and the one who writes to global
memory, therefore no synchronization is required
after the last iteration.
*/
if (threadIdx.x == 0)
atomicAdd(&dResult, data[0]);
}
/*
Warp-level improvement: using warp-level primitives to
accelerate the final steps of the reduction. Warps
have a fast lane for communication. They are free
to exchange values in registers when they are being
scheduled for execution. Warps will be formed from
consecutive threads in groups of 32.
*/
template <unsigned int BLOCK_SIZE>
__global__ void reduceShuffle(const float* __restrict input, int N)
{
const int id = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ float data[BLOCK_SIZE];
data[threadIdx.x] = (id < N ? input[id] : 0);
// Only use shared memory until last 32 values
for (int s = blockDim.x / 2; s > 16; s /= 2)
{
__syncthreads();
if (threadIdx.x < s)
data[threadIdx.x] += data[threadIdx.x + s];
}
// The last 32 values can be handled with warp-level primitives
float x = data[threadIdx.x];
if (threadIdx.x < 32)
{
/*
The threads in the first warp shuffle their registers.
This replaces the last 5 iterations of the previous solution.
The mask indicates which threads participate in the shuffle.
The value indicates which register should be shuffled.
The final parameter gives the source thread from which the
current one should receive the shuffled value. Accesses that
are out of range (>= 32) will wrap around, but are not needed
(they will not affect the final result written by thread 0).
In each shuffle, at least half of the threads only participate
so they can provide useful data from the previous shuffle for
lower threads. To keep the code short, we always let all threads
participate, because it is an error to let threads reach a shuffle
instruction that they don't participate in.
*/
x += __shfl_sync(0xFFFFFFFF, x, threadIdx.x + 16);
x += __shfl_sync(0xFFFFFFFF, x, threadIdx.x + 8);
x += __shfl_sync(0xFFFFFFFF, x, threadIdx.x + 4);
x += __shfl_sync(0xFFFFFFFF, x, threadIdx.x + 2);
x += __shfl_sync(0xFFFFFFFF, x, 1);
}
if (threadIdx.x == 0)
atomicAdd(&dResult, x);
}
/*
Final improvement: half of our threads actually idle after
they have loaded data from global memory to shared! Better
to have threads fetch two values at the start and then let
them all do at least some meaningful work. This means that
compared to all other methods, only half the number of
threads must be launched in the grid!
*/
template <unsigned int BLOCK_SIZE>
__global__ void reduceFinal(const float* __restrict input, int N)
{
const int id = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ float data[BLOCK_SIZE];
// Already combine two values upon load from global memory
data[threadIdx.x] = (id < N ? (input[id] + input[id+N/2]) : 0);
for (int s = blockDim.x / 2; s > 16; s /= 2)
{
__syncthreads();
if (threadIdx.x < s)
data[threadIdx.x] += data[threadIdx.x + s];
}
float x = data[threadIdx.x];
if (threadIdx.x < 32)
{
x += __shfl_sync(0xFFFFFFFF, x, threadIdx.x + 16);
x += __shfl_sync(0xFFFFFFFF, x, threadIdx.x + 8);
x += __shfl_sync(0xFFFFFFFF, x, threadIdx.x + 4);
x += __shfl_sync(0xFFFFFFFF, x, threadIdx.x + 2);
x += __shfl_sync(0xFFFFFFFF, x, 1);
}
if (threadIdx.x == 0)
atomicAdd(&dResult, x);
}
int main()
{
std::cout << "==== Sample 08 - Reductions ====\n" << std::endl;
/*
Expected output: Accumulated results from CPU and GPU that
approach 42 * NUM_ITEMS (can vary greatly due to floating point
precision limitations).
With more sophisticated techniques, reported performance of the
GPU versions (measured runtime in ms) should generally decrease.
*/
constexpr unsigned int BLOCK_SIZE = 256;
constexpr unsigned int WARMUP_ITERATIONS = 10;
constexpr unsigned int TIMING_ITERATIONS = 20;
constexpr unsigned int N = 100'000'000;
std::cout << "Producing random inputs...\n" << std::endl;
// Generate some random numbers to reduce
std::vector<float> vals;
float* dValsPtr;
samplesutil::prepareRandomNumbersCPUGPU(N, vals, &dValsPtr);
std::cout << "==== CPU Reduction ====\n" << std::endl;
// A reference value is computed by sequential reduction
std::cout << "Computed CPU value: " << std::accumulate(vals.cbegin(), vals.cend(), 0.0f) << std::endl;
std::cout << "==== GPU Reductions ====\n" << std::endl;
/*
Set up a collection of reductions to evaluate for performance.
Each entry gives a technique's name, the kernel to call, and
the number of threads required for each individual technique.
*/
const std::tuple<const char*, void(*)(const float*, int), unsigned int> reductionTechniques[]
{
{"Atomic Global", reduceAtomicGlobal, N},
{"Atomic Shared", reduceAtomicShared, N},
{"Reduce Shared", reduceShared<BLOCK_SIZE>, N},
{"Reduce Shuffle", reduceShuffle<BLOCK_SIZE>, N},
{"Reduce Final", reduceFinal<BLOCK_SIZE>, N / 2 + 1}
};
// Evaluate each technique separately
for (const auto& [name, func, numThreads] : reductionTechniques)
{
// Compute the smallest grid to start required threads with a given block size
const dim3 blockDim = { BLOCK_SIZE, 1, 1 };
const dim3 gridDim = { (numThreads + BLOCK_SIZE - 1) / BLOCK_SIZE, 1, 1 };
// Run several reductions for GPU to warm up
for (int i = 0; i < WARMUP_ITERATIONS; i++)
func<<<gridDim, blockDim>>>(dValsPtr, N);
// Synchronize to ensure CPU only records time after warmup is done
cudaDeviceSynchronize();
const auto before = std::chrono::system_clock::now();
float result = 0.0f;
// Run several iterations to get an average measurement
for (int i = 0; i < TIMING_ITERATIONS; i++)
{
// Reset acummulated result to 0 in each run
cudaMemcpyToSymbol(dResult, &result, sizeof(float));
func<<<gridDim, blockDim>>>(dValsPtr, N);
}
// cudaMemcpyFromSymbol will implicitly synchronize CPU and GPU
cudaMemcpyFromSymbol(&result, dResult, sizeof(float));
// Can measure time without an extra synchronization
const auto after = std::chrono::system_clock::now();
const auto elapsed = 1000.f * std::chrono::duration_cast<std::chrono::duration<float>>(after - before).count();
std::cout << std::setw(20) << name << "\t" << elapsed / TIMING_ITERATIONS << "ms \t" << result << std::endl;
}
// Free the allocated memory for input
cudaFree(dValsPtr);
return 0;
}
/*
Exercises:
1) Do you have any other ideas how the reduction could be improved?
Making it even faster should be quite challenging, but if you have
some suggestions, try them out and see how they affect performance!
*/
|
b6804e620b0aabb76546e0e96f8ae0847f69d535.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* this file is based on https://github.com/niessner/BundleFusion.git
*/
#include "cuda_scene_rep.h"
#include <hip/hip_texture_types.h>
#include <pcl/Vertices.h>
#include <pcl/conversions.h>
#include "tables.h"
///////////////
// Host part //
///////////////
__device__ __host__
HashDataStruct::HashDataStruct() {
d_heap = NULL;
d_heapCounter = NULL;
d_hash = NULL;
d_hashDecision = NULL;
d_hashDecisionPrefix = NULL;
d_hashCompactified = NULL;
d_hashCompactifiedCounter = NULL;
d_SDFBlocks = NULL;
d_hashBucketMutex = NULL;
m_bIsOnGPU = false;
}
__host__
void HashDataStruct::allocate(const HashParams ¶ms, bool dataOnGPU) {
m_bIsOnGPU = dataOnGPU;
if (m_bIsOnGPU) {
cutilSafeCall(hipMalloc(&d_heap, sizeof(unsigned int) * params.m_numSDFBlocks));
cutilSafeCall(hipMalloc(&d_heapCounter, sizeof(unsigned int)));
cutilSafeCall(hipMalloc(&d_hash, sizeof(HashEntry) * params.m_hashNumBuckets * params.m_hashBucketSize));
cutilSafeCall(hipMalloc(&d_hashDecision, sizeof(int) * params.m_hashNumBuckets * params.m_hashBucketSize));
cutilSafeCall(
hipMalloc(&d_hashDecisionPrefix, sizeof(int) * params.m_hashNumBuckets * params.m_hashBucketSize));
cutilSafeCall(
hipMalloc(&d_hashCompactified, sizeof(HashEntry) * params.m_hashNumBuckets * params.m_hashBucketSize));
cutilSafeCall(hipMalloc(&d_hashCompactifiedCounter, sizeof(int)));
cutilSafeCall(hipMalloc(&d_SDFBlocks,
sizeof(Voxel) * params.m_numSDFBlocks * params.m_SDFBlockSize * params.m_SDFBlockSize *
params.m_SDFBlockSize));
cutilSafeCall(hipMalloc(&d_hashBucketMutex, sizeof(int) * params.m_hashNumBuckets));
} else {
d_heap = new unsigned int[params.m_numSDFBlocks];
d_heapCounter = new unsigned int[1];
d_hash = new HashEntry[params.m_hashNumBuckets * params.m_hashBucketSize];
d_hashDecision = new int[params.m_hashNumBuckets * params.m_hashBucketSize];
d_hashDecisionPrefix = new int[params.m_hashNumBuckets * params.m_hashBucketSize];
d_hashCompactifiedCounter = new int[1];
d_hashCompactified = new HashEntry[params.m_hashNumBuckets * params.m_hashBucketSize];
d_SDFBlocks = new Voxel[params.m_numSDFBlocks * params.m_SDFBlockSize * params.m_SDFBlockSize *
params.m_SDFBlockSize];
d_hashBucketMutex = new int[params.m_hashNumBuckets];
}
updateParams(params);
}
extern "C" void updateConstantHashParams(const HashParams& params) {
size_t size;
CUDA_CHECKED_CALL(hipGetSymbolSize(&size, c_hashParams));
CUDA_CHECKED_CALL(hipMemcpyToSymbol(c_hashParams, ¶ms, size, 0, hipMemcpyHostToDevice));
CUDA_CHECKED_CALL(hipDeviceSynchronize());
}
__host__
void HashDataStruct::updateParams(const HashParams ¶ms) {
if (m_bIsOnGPU) {
updateConstantHashParams(params);
}
}
__host__
void HashDataStruct::free() {
if (m_bIsOnGPU) {
cutilSafeCall(hipFree(d_heap));
cutilSafeCall(hipFree(d_heapCounter));
cutilSafeCall(hipFree(d_hash));
cutilSafeCall(hipFree(d_hashDecision));
cutilSafeCall(hipFree(d_hashDecisionPrefix));
cutilSafeCall(hipFree(d_hashCompactified));
cutilSafeCall(hipFree(d_hashCompactifiedCounter));
cutilSafeCall(hipFree(d_SDFBlocks));
cutilSafeCall(hipFree(d_hashBucketMutex));
} else {
if (d_heap) delete[] d_heap;
if (d_heapCounter) delete[] d_heapCounter;
if (d_hash) delete[] d_hash;
if (d_hashDecision) delete[] d_hashDecision;
if (d_hashDecisionPrefix) delete[] d_hashDecisionPrefix;
if (d_hashCompactified) delete[] d_hashCompactified;
if (d_hashCompactifiedCounter) delete[] d_hashCompactifiedCounter;
if (d_SDFBlocks) delete[] d_SDFBlocks;
if (d_hashBucketMutex) delete[] d_hashBucketMutex;
}
d_hash = NULL;
d_heap = NULL;
d_heapCounter = NULL;
d_hashDecision = NULL;
d_hashDecisionPrefix = NULL;
d_hashCompactified = NULL;
d_hashCompactifiedCounter = NULL;
d_SDFBlocks = NULL;
d_hashBucketMutex = NULL;
}
__host__
HashDataStruct HashDataStruct::copyToCPU() const {
HashParams params;
HashDataStruct hashData;
hashData.allocate(params, false); //allocate the data on the CPU
cutilSafeCall(
hipMemcpy(hashData.d_heap, d_heap, sizeof(unsigned int) * params.m_numSDFBlocks, hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(hashData.d_heapCounter, d_heapCounter, sizeof(unsigned int), hipMemcpyDeviceToHost));
cutilSafeCall(
hipMemcpy(hashData.d_hash, d_hash, sizeof(HashEntry) * params.m_hashNumBuckets * params.m_hashBucketSize,
hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(hashData.d_hashDecision, d_hashDecision,
sizeof(int) * params.m_hashNumBuckets * params.m_hashBucketSize, hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(hashData.d_hashDecisionPrefix, d_hashDecisionPrefix,
sizeof(int) * params.m_hashNumBuckets * params.m_hashBucketSize, hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(hashData.d_hashCompactified, d_hashCompactified,
sizeof(HashEntry) * params.m_hashNumBuckets * params.m_hashBucketSize,
hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(hashData.d_SDFBlocks, d_SDFBlocks,
sizeof(Voxel) * params.m_numSDFBlocks * params.m_SDFBlockSize * params.m_SDFBlockSize *
params.m_SDFBlockSize, hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(hashData.d_hashBucketMutex, d_hashBucketMutex, sizeof(int) * params.m_hashNumBuckets,
hipMemcpyDeviceToHost));
return hashData; //TODO MATTHIAS look at this (i.e,. when does memory get destroyed ; if it's in the destructer it would kill everything here
}
/////////////////
// Device part //
/////////////////
__device__
const HashParams &HashDataStruct::params() const {
return c_hashParams;
}
//! see teschner et al. (but with correct prime values)
__device__
uint HashDataStruct::computeHashPos(const int3 &virtualVoxelPos) const {
const int p0 = 73856093;
const int p1 = 19349669;
const int p2 = 83492791;
int res = ((virtualVoxelPos.x * p0) ^ (virtualVoxelPos.y * p1) ^ (virtualVoxelPos.z * p2)) %
c_hashParams.m_hashNumBuckets;
if (res < 0) res += c_hashParams.m_hashNumBuckets;
return (uint) res;
}
//merges two voxels (v0 is the input voxel, v1 the currently stored voxel)
__device__
void HashDataStruct::combineVoxel(const Voxel &v0, const Voxel &v1, Voxel &out) const {
//v.color = (10*v0.weight * v0.color + v1.weight * v1.color)/(10*v0.weight + v1.weight); //give the currently observed color more weight
//v.color = (v0.weight * v0.color + v1.weight * v1.color)/(v0.weight + v1.weight);
//out.color = 0.5f * (v0.color + v1.color); //exponential running average
float3 c0 = make_float3(v0.color.x, v0.color.y, v0.color.z);
float3 c1 = make_float3(v1.color.x, v1.color.y, v1.color.z);
//float3 res = (c0+c1)/2;
//float3 res = (c0 * (float)v0.weight + c1 * (float)v1.weight) / ((float)v0.weight + (float)v1.weight);
//float3 res = c1;
if (v0.weight == 0) out.color = v1.color;
else {
float3 res = 0.5f * c0 + 0.5f * c1;
out.color.x = (uchar)(res.x + 0.5f);
out.color.y = (uchar)(res.y + 0.5f);
out.color.z = (uchar)(res.z + 0.5f);
}
out.sdf = (v0.sdf * (float) v0.weight + v1.sdf * (float) v1.weight) / ((float) v0.weight + (float) v1.weight);
//out.weight = min(c_hashParams.m_integrationWeightMax, (unsigned int)v0.weight + (unsigned int)v1.weight);
out.weight = min((float) c_hashParams.m_integrationWeightMax, v0.weight + v1.weight);
}
__device__
void HashDataStruct::combineVoxelDepthOnly(const Voxel &v0, const Voxel &v1, Voxel &out) const {
out.sdf = (v0.sdf * (float) v0.weight + v1.sdf * (float) v1.weight) / ((float) v0.weight + (float) v1.weight);
out.weight = min((float) c_hashParams.m_integrationWeightMax, v0.weight + v1.weight);
}
//! returns the truncation of the SDF for a given distance value
__device__
float HashDataStruct::getTruncation(float z) const {
return c_hashParams.m_truncation + c_hashParams.m_truncScale * z;
}
__device__
float3 HashDataStruct::worldToVirtualVoxelPosFloat(const float3 &pos) const {
return pos / c_hashParams.m_virtualVoxelSize;
}
__device__
int3 HashDataStruct::worldToVirtualVoxelPos(const float3 &pos) const {
//const float3 p = pos*g_VirtualVoxelResolutionScalar;
const float3 p = pos / c_hashParams.m_virtualVoxelSize;
return make_int3(p + make_float3(sign(p)) * 0.5f);
}
__device__
int3 HashDataStruct::virtualVoxelPosToSDFBlock(int3 virtualVoxelPos) const {
if (virtualVoxelPos.x < 0) virtualVoxelPos.x -= SDF_BLOCK_SIZE - 1;
if (virtualVoxelPos.y < 0) virtualVoxelPos.y -= SDF_BLOCK_SIZE - 1;
if (virtualVoxelPos.z < 0) virtualVoxelPos.z -= SDF_BLOCK_SIZE - 1;
return make_int3(
virtualVoxelPos.x / SDF_BLOCK_SIZE,
virtualVoxelPos.y / SDF_BLOCK_SIZE,
virtualVoxelPos.z / SDF_BLOCK_SIZE);
}
// Computes virtual voxel position of corner sample position
__device__
int3 HashDataStruct::SDFBlockToVirtualVoxelPos(const int3 &sdfBlock) const {
return sdfBlock * SDF_BLOCK_SIZE;
}
__device__
float3 HashDataStruct::virtualVoxelPosToWorld(const int3 &pos) const {
return make_float3(pos) * c_hashParams.m_virtualVoxelSize;
}
__device__
float3 HashDataStruct::SDFBlockToWorld(const int3 &sdfBlock) const {
return virtualVoxelPosToWorld(SDFBlockToVirtualVoxelPos(sdfBlock));
}
__device__
int3 HashDataStruct::worldToSDFBlock(const float3 &worldPos) const {
return virtualVoxelPosToSDFBlock(worldToVirtualVoxelPos(worldPos));
}
__device__
bool HashDataStruct::isSDFBlockInCameraFrustumApprox(const int3 &sdfBlock, CUDAFrame &frame) {
float3 posWorld = virtualVoxelPosToWorld(SDFBlockToVirtualVoxelPos(sdfBlock)) +
c_hashParams.m_virtualVoxelSize * 0.5f * (SDF_BLOCK_SIZE - 1.0f);
return frame.isInCameraFrustumApprox(posWorld);
}
//! computes the (local) virtual voxel pos of an index; idx in [0;511]
__device__
uint3 HashDataStruct::delinearizeVoxelIndex(uint idx) const {
uint x = idx % SDF_BLOCK_SIZE;
uint y = (idx % (SDF_BLOCK_SIZE * SDF_BLOCK_SIZE)) / SDF_BLOCK_SIZE;
uint z = idx / (SDF_BLOCK_SIZE * SDF_BLOCK_SIZE);
return make_uint3(x, y, z);
}
//! computes the linearized index of a local virtual voxel pos; pos in [0;7]^3
__device__
uint HashDataStruct::linearizeVoxelPos(const int3 &virtualVoxelPos) const {
return
virtualVoxelPos.z * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE +
virtualVoxelPos.y * SDF_BLOCK_SIZE +
virtualVoxelPos.x;
}
__device__
int HashDataStruct::virtualVoxelPosToLocalSDFBlockIndex(const int3 &virtualVoxelPos) const {
int3 localVoxelPos = make_int3(
virtualVoxelPos.x % SDF_BLOCK_SIZE,
virtualVoxelPos.y % SDF_BLOCK_SIZE,
virtualVoxelPos.z % SDF_BLOCK_SIZE);
if (localVoxelPos.x < 0) localVoxelPos.x += SDF_BLOCK_SIZE;
if (localVoxelPos.y < 0) localVoxelPos.y += SDF_BLOCK_SIZE;
if (localVoxelPos.z < 0) localVoxelPos.z += SDF_BLOCK_SIZE;
return linearizeVoxelPos(localVoxelPos);
}
__device__
int HashDataStruct::worldToLocalSDFBlockIndex(const float3 &world) const {
int3 virtualVoxelPos = worldToVirtualVoxelPos(world);
return virtualVoxelPosToLocalSDFBlockIndex(virtualVoxelPos);
}
//! returns the hash entry for a given worldPos; if there was no hash entry the returned entry will have a ptr with FREE_ENTRY set
__device__
HashEntry HashDataStruct::getHashEntry(const float3 &worldPos) const {
//int3 blockID = worldToSDFVirtualVoxelPos(worldPos)/SDF_BLOCK_SIZE; //position of sdf block
int3 blockID = worldToSDFBlock(worldPos);
return getHashEntryForSDFBlockPos(blockID);
}
__device__
void HashDataStruct::deleteHashEntry(uint id) {
deleteHashEntry(d_hash[id]);
}
__device__
void HashDataStruct::deleteHashEntry(HashEntry &hashEntry) {
hashEntry.pos = make_int3(0);
hashEntry.offset = 0;
hashEntry.ptr = FREE_ENTRY;
}
__device__
bool HashDataStruct::voxelExists(const float3 &worldPos) const {
HashEntry hashEntry = getHashEntry(worldPos);
return (hashEntry.ptr != FREE_ENTRY);
}
__device__
void HashDataStruct::deleteVoxel(Voxel &v) const {
v.color = make_uchar4(0, 0, 0, 0);
v.weight = 0.0f;
v.sdf = 0.0f;
}
__device__
void HashDataStruct::deleteVoxel(uint id) {
deleteVoxel(d_SDFBlocks[id]);
}
__device__
Voxel HashDataStruct::getVoxel(const float3 &worldPos) const {
HashEntry hashEntry = getHashEntry(worldPos);
Voxel v;
if (hashEntry.ptr == FREE_ENTRY) {
deleteVoxel(v);
} else {
int3 virtualVoxelPos = worldToVirtualVoxelPos(worldPos);
v = d_SDFBlocks[hashEntry.ptr + virtualVoxelPosToLocalSDFBlockIndex(virtualVoxelPos)];
}
return v;
}
__device__
Voxel HashDataStruct::getVoxel(const int3 &virtualVoxelPos) const {
HashEntry hashEntry = getHashEntryForSDFBlockPos(virtualVoxelPosToSDFBlock(virtualVoxelPos));
Voxel v;
if (hashEntry.ptr == FREE_ENTRY) {
deleteVoxel(v);
} else {
v = d_SDFBlocks[hashEntry.ptr + virtualVoxelPosToLocalSDFBlockIndex(virtualVoxelPos)];
}
return v;
}
__device__
void HashDataStruct::setVoxel(const int3 &virtualVoxelPos, Voxel &voxelInput) const {
HashEntry hashEntry = getHashEntryForSDFBlockPos(virtualVoxelPosToSDFBlock(virtualVoxelPos));
if (hashEntry.ptr != FREE_ENTRY) {
d_SDFBlocks[hashEntry.ptr + virtualVoxelPosToLocalSDFBlockIndex(virtualVoxelPos)] = voxelInput;
}
}
//! returns the hash entry for a given sdf block id; if there was no hash entry the returned entry will have a ptr with FREE_ENTRY set
__device__
HashEntry HashDataStruct::getHashEntryForSDFBlockPos(const int3 &sdfBlock) const {
uint h = computeHashPos(sdfBlock); //hash bucket
uint hp = h * HASH_BUCKET_SIZE; //hash position
HashEntry entry;
entry.pos = sdfBlock;
entry.offset = 0;
entry.ptr = FREE_ENTRY;
for (uint j = 0; j < HASH_BUCKET_SIZE; j++) {
uint i = j + hp;
HashEntry curr = d_hash[i];
if (curr.pos.x == entry.pos.x && curr.pos.y == entry.pos.y && curr.pos.z == entry.pos.z &&
curr.ptr != FREE_ENTRY) {
return curr;
}
}
#ifdef HANDLE_COLLISIONS
const uint idxLastEntryInBucket = (h + 1) * HASH_BUCKET_SIZE - 1;
int i = idxLastEntryInBucket; //start with the last entry of the current bucket
HashEntry curr;
//traverse list until end: memorize idx at list end and memorize offset from last element of bucket to list end
unsigned int maxIter = 0;
uint g_MaxLoopIterCount = c_hashParams.m_hashMaxCollisionLinkedListSize;
#pragma unroll 1
while (maxIter < g_MaxLoopIterCount) {
curr = d_hash[i];
if (curr.pos.x == entry.pos.x && curr.pos.y == entry.pos.y && curr.pos.z == entry.pos.z &&
curr.ptr != FREE_ENTRY) {
return curr;
}
if (curr.offset == 0) { //we have found the end of the list
break;
}
i = idxLastEntryInBucket + curr.offset; //go to next element in the list
i %= (HASH_BUCKET_SIZE * c_hashParams.m_hashNumBuckets); //check for overflow
maxIter++;
}
#endif
return entry;
}
//for histogram (no collision traversal)
__device__
unsigned int HashDataStruct::getNumHashEntriesPerBucket(unsigned int bucketID) {
unsigned int h = 0;
for (uint i = 0; i < HASH_BUCKET_SIZE; i++) {
if (d_hash[bucketID * HASH_BUCKET_SIZE + i].ptr != FREE_ENTRY) {
h++;
}
}
return h;
}
//for histogram (collisions traversal only)
__device__
unsigned int HashDataStruct::getNumHashLinkedList(unsigned int bucketID) {
unsigned int listLen = 0;
#ifdef HANDLE_COLLISIONS
const uint idxLastEntryInBucket = (bucketID + 1) * HASH_BUCKET_SIZE - 1;
unsigned int i = idxLastEntryInBucket; //start with the last entry of the current bucket
//int offset = 0;
HashEntry curr;
curr.offset = 0;
//traverse list until end: memorize idx at list end and memorize offset from last element of bucket to list end
unsigned int maxIter = 0;
uint g_MaxLoopIterCount = c_hashParams.m_hashMaxCollisionLinkedListSize;
#pragma unroll 1
while (maxIter < g_MaxLoopIterCount) {
//offset = curr.offset;
//curr = getHashEntry(g_Hash, i);
curr = d_hash[i];
if (curr.offset == 0) { //we have found the end of the list
break;
}
i = idxLastEntryInBucket + curr.offset; //go to next element in the list
i %= (HASH_BUCKET_SIZE * c_hashParams.m_hashNumBuckets); //check for overflow
listLen++;
maxIter++;
}
#endif
return listLen;
}
__device__
uint HashDataStruct::consumeHeap() {
uint addr = atomicSub(&d_heapCounter[0], 1);
//TODO MATTHIAS check some error handling?
return d_heap[addr];
}
__device__
void HashDataStruct::appendHeap(uint ptr) {
uint addr = atomicAdd(&d_heapCounter[0], 1);
//TODO MATTHIAS check some error handling?
d_heap[addr + 1] = ptr;
}
//pos in SDF block coordinates
__device__
void HashDataStruct::allocBlock(const int3 &pos) {
uint h = computeHashPos(pos); //hash bucket
uint hp = h * HASH_BUCKET_SIZE; //hash position
int firstEmpty = -1;
for (uint j = 0; j < HASH_BUCKET_SIZE; j++) {
uint i = j + hp;
const HashEntry &curr = d_hash[i];
//in that case the SDF-block is already allocated and corresponds to the current position -> exit thread
if (curr.pos.x == pos.x && curr.pos.y == pos.y && curr.pos.z == pos.z && curr.ptr != FREE_ENTRY) {
return;
}
//store the first FREE_ENTRY hash entry
if (firstEmpty == -1 && curr.ptr == FREE_ENTRY) {
firstEmpty = i;
}
}
#ifdef HANDLE_COLLISIONS
//updated variables as after the loop
const uint idxLastEntryInBucket = (h + 1) * HASH_BUCKET_SIZE - 1; //get last index of bucket
uint i = idxLastEntryInBucket; //start with the last entry of the current bucket
//int offset = 0;
HashEntry curr;
curr.offset = 0;
//traverse list until end: memorize idx at list end and memorize offset from last element of bucket to list end
//int k = 0;
unsigned int maxIter = 0;
uint g_MaxLoopIterCount = c_hashParams.m_hashMaxCollisionLinkedListSize;
#pragma unroll 1
while (maxIter < g_MaxLoopIterCount) {
//offset = curr.offset;
curr = d_hash[i]; //TODO MATTHIAS do by reference
if (curr.pos.x == pos.x && curr.pos.y == pos.y && curr.pos.z == pos.z && curr.ptr != FREE_ENTRY) {
return;
}
if (curr.offset == 0) { //we have found the end of the list
break;
}
i = idxLastEntryInBucket + curr.offset; //go to next element in the list
i %= (HASH_BUCKET_SIZE * c_hashParams.m_hashNumBuckets); //check for overflow
maxIter++;
}
#endif
if (firstEmpty != -1) { //if there is an empty entry and we haven't allocated the current entry before
//int prevValue = 0;
//InterlockedExchange(d_hashBucketMutex[h], LOCK_ENTRY, prevValue); //lock the hash bucket
int prevValue = atomicExch(&d_hashBucketMutex[h], LOCK_ENTRY);
if (prevValue != LOCK_ENTRY) { //only proceed if the bucket has been locked
HashEntry &entry = d_hash[firstEmpty];
entry.pos = pos;
entry.offset = NO_OFFSET;
long index = consumeHeap();
entry.ptr = index * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE; //memory alloc
}
return;
}
#ifdef HANDLE_COLLISIONS
//if (i != idxLastEntryInBucket) return;
int offset = 0;
//linear search for free entry
maxIter = 0;
#pragma unroll 1
while (maxIter < g_MaxLoopIterCount) {
offset++;
i = (idxLastEntryInBucket + offset) %
(HASH_BUCKET_SIZE * c_hashParams.m_hashNumBuckets); //go to next hash element
if ((offset % HASH_BUCKET_SIZE) == 0)
continue; //cannot insert into a last bucket element (would conflict with other linked lists)
curr = d_hash[i];
//if (curr.pos.x == pos.x && curr.pos.y == pos.y && curr.pos.z == pos.z && curr.ptr != FREE_ENTRY) {
// return;
//}
if (curr.ptr == FREE_ENTRY) { //this is the first free entry
//int prevValue = 0;
//InterlockedExchange(g_HashBucketMutex[h], LOCK_ENTRY, prevValue); //lock the original hash bucket
int prevValue = atomicExch(&d_hashBucketMutex[h], LOCK_ENTRY);
if (prevValue != LOCK_ENTRY) {
HashEntry lastEntryInBucket = d_hash[idxLastEntryInBucket];
h = i / HASH_BUCKET_SIZE;
//InterlockedExchange(g_HashBucketMutex[h], LOCK_ENTRY, prevValue); //lock the hash bucket where we have found a free entry
prevValue = atomicExch(&d_hashBucketMutex[h], LOCK_ENTRY);
if (prevValue != LOCK_ENTRY) { //only proceed if the bucket has been locked
HashEntry &entry = d_hash[i];
entry.pos = pos;
entry.offset = lastEntryInBucket.offset;
long index = consumeHeap();
entry.ptr = index * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE; //memory alloc
lastEntryInBucket.offset = offset;
d_hash[idxLastEntryInBucket] = lastEntryInBucket;
//setHashEntry(g_Hash, idxLastEntryInBucket, lastEntryInBucket);
}
}
return; //bucket was already locked
}
maxIter++;
}
#endif
}
//!inserts a hash entry without allocating any memory: used by streaming: TODO MATTHIAS check the atomics in this function
__device__
bool HashDataStruct::insertHashEntry(HashEntry entry) {
uint h = computeHashPos(entry.pos);
uint hp = h * HASH_BUCKET_SIZE;
for (uint j = 0; j < HASH_BUCKET_SIZE; j++) {
uint i = j + hp;
//const HashEntry& curr = d_hash[i];
int prevWeight = 0;
//InterlockedCompareExchange(hash[3*i+2], FREE_ENTRY, LOCK_ENTRY, prevWeight);
prevWeight = atomicCAS(&d_hash[i].ptr, FREE_ENTRY, LOCK_ENTRY);
if (prevWeight == FREE_ENTRY) {
d_hash[i] = entry;
//setHashEntry(hash, i, entry);
return true;
}
}
#ifdef HANDLE_COLLISIONS
//updated variables as after the loop
const uint idxLastEntryInBucket = (h + 1) * HASH_BUCKET_SIZE - 1; //get last index of bucket
uint i = idxLastEntryInBucket; //start with the last entry of the current bucket
HashEntry curr;
unsigned int maxIter = 0;
//[allow_uav_condition]
uint g_MaxLoopIterCount = c_hashParams.m_hashMaxCollisionLinkedListSize;
#pragma unroll 1
while (maxIter <
g_MaxLoopIterCount) { //traverse list until end // why find the end? we you are inserting at the start !!!
//curr = getHashEntry(hash, i);
curr = d_hash[i]; //TODO MATTHIAS do by reference
if (curr.offset == 0) break; //we have found the end of the list
i = idxLastEntryInBucket + curr.offset; //go to next element in the list
i %= (HASH_BUCKET_SIZE * c_hashParams.m_hashNumBuckets); //check for overflow
maxIter++;
}
maxIter = 0;
int offset = 0;
#pragma unroll 1
while (maxIter <
g_MaxLoopIterCount) { //linear search for free entry
offset++;
uint i = (idxLastEntryInBucket + offset) %
(HASH_BUCKET_SIZE * c_hashParams.m_hashNumBuckets); //go to next hash element
if ((offset % HASH_BUCKET_SIZE) == 0)
continue; //cannot insert into a last bucket element (would conflict with other linked lists)
int prevWeight = 0;
//InterlockedCompareExchange(hash[3*i+2], FREE_ENTRY, LOCK_ENTRY, prevWeight); //check for a free entry
uint *d_hashUI = (uint *) d_hash;
prevWeight = prevWeight = atomicCAS(&d_hashUI[3 * idxLastEntryInBucket + 1], (uint) FREE_ENTRY,
(uint) LOCK_ENTRY);
if (prevWeight ==
FREE_ENTRY) { //if free entry found set prev->next = curr & curr->next = prev->next
//[allow_uav_condition]
//while(hash[3*idxLastEntryInBucket+2] == LOCK_ENTRY); // expects setHashEntry to set the ptr last, required because pos.z is packed into the same value -> prev->next = curr -> might corrput pos.z
HashEntry lastEntryInBucket = d_hash[idxLastEntryInBucket]; //get prev (= lastEntry in Bucket)
int newOffsetPrev =
(offset << 16) | (lastEntryInBucket.pos.z & 0x0000ffff); //prev->next = curr (maintain old z-pos)
int oldOffsetPrev = 0;
//InterlockedExchange(hash[3*idxLastEntryInBucket+1], newOffsetPrev, oldOffsetPrev); //set prev offset atomically
uint *d_hashUI = (uint *) d_hash;
oldOffsetPrev = prevWeight = atomicExch(&d_hashUI[3 * idxLastEntryInBucket + 1], newOffsetPrev);
entry.offset = oldOffsetPrev
>> 16; //remove prev z-pos from old offset
//setHashEntry(hash, i, entry); //sets the current hashEntry with: curr->next = prev->next
d_hash[i] = entry;
return true;
}
maxIter++;
}
#endif
return false;
}
//! deletes a hash entry position for a given sdfBlock index (returns true uppon successful deletion; otherwise returns false)
__device__
bool HashDataStruct::deleteHashEntryElement(const int3 &sdfBlock) {
uint h = computeHashPos(sdfBlock); //hash bucket
uint hp = h * HASH_BUCKET_SIZE; //hash position
for (uint j = 0; j < HASH_BUCKET_SIZE; j++) {
uint i = j + hp;
const HashEntry &curr = d_hash[i];
if (curr.pos.x == sdfBlock.x && curr.pos.y == sdfBlock.y && curr.pos.z == sdfBlock.z &&
curr.ptr != FREE_ENTRY) {
#ifndef HANDLE_COLLISIONS
const uint linBlockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
appendHeap(curr.ptr / linBlockSize);
//heapAppend.Append(curr.ptr / linBlockSize);
deleteHashEntry(i);
return true;
#endif
#ifdef HANDLE_COLLISIONS
if (curr.offset != 0) { //if there was a pointer set it to the next list element
//int prevValue = 0;
//InterlockedExchange(bucketMutex[h], LOCK_ENTRY, prevValue); //lock the hash bucket
int prevValue = atomicExch(&d_hashBucketMutex[h], LOCK_ENTRY);
if (prevValue == LOCK_ENTRY) return false;
if (prevValue != LOCK_ENTRY) {
const uint linBlockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
appendHeap(curr.ptr / linBlockSize);
//heapAppend.Append(curr.ptr / linBlockSize);
int nextIdx = (i + curr.offset) % (HASH_BUCKET_SIZE * c_hashParams.m_hashNumBuckets);
//setHashEntry(hash, i, getHashEntry(hash, nextIdx));
d_hash[i] = d_hash[nextIdx];
deleteHashEntry(nextIdx);
return true;
}
} else {
const uint linBlockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
appendHeap(curr.ptr / linBlockSize);
//heapAppend.Append(curr.ptr / linBlockSize);
deleteHashEntry(i);
return true;
}
#endif //HANDLE_COLLSISION
}
}
#ifdef HANDLE_COLLISIONS
const uint idxLastEntryInBucket = (h + 1) * HASH_BUCKET_SIZE - 1;
int i = idxLastEntryInBucket;
HashEntry curr;
curr = d_hash[i];
int prevIdx = i;
i = idxLastEntryInBucket + curr.offset; //go to next element in the list
i %= (HASH_BUCKET_SIZE * c_hashParams.m_hashNumBuckets); //check for overflow
unsigned int maxIter = 0;
uint g_MaxLoopIterCount = c_hashParams.m_hashMaxCollisionLinkedListSize;
#pragma unroll 1
while (maxIter < g_MaxLoopIterCount) {
curr = d_hash[i];
//found that dude that we need/want to delete
if (curr.pos.x == sdfBlock.x && curr.pos.y == sdfBlock.y && curr.pos.z == sdfBlock.z &&
curr.ptr != FREE_ENTRY) {
//int prevValue = 0;
//InterlockedExchange(bucketMutex[h], LOCK_ENTRY, prevValue); //lock the hash bucket
int prevValue = atomicExch(&d_hashBucketMutex[h], LOCK_ENTRY);
if (prevValue == LOCK_ENTRY) return false;
if (prevValue != LOCK_ENTRY) {
const uint linBlockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
appendHeap(curr.ptr / linBlockSize);
//heapAppend.Append(curr.ptr / linBlockSize);
deleteHashEntry(i);
HashEntry prev = d_hash[prevIdx];
prev.offset = curr.offset;
//setHashEntry(hash, prevIdx, prev);
d_hash[prevIdx] = prev;
return true;
}
}
if (curr.offset == 0) { //we have found the end of the list
return false; //should actually never happen because we need to find that guy before
}
prevIdx = i;
i = idxLastEntryInBucket + curr.offset; //go to next element in the list
i %= (HASH_BUCKET_SIZE * c_hashParams.m_hashNumBuckets); //check for overflow
maxIter++;
}
#endif // HANDLE_COLLSISION
return false;
}
#define T_PER_BLOCK 16
texture<float, hipTextureType2D, hipReadModeElementType> depthTextureRef;
texture<uchar4, hipTextureType2D, hipReadModeElementType> colorTextureRef;
void bindInputDepthColorTextures(const CUDAFrame& frame)
{
int width = frame.imageWidth, height = frame.imageHeight;
cutilSafeCall(hipBindTexture2D(0, &depthTextureRef, frame.depthData, &depthTextureRef.channelDesc, width, height, sizeof(float)*width));
cutilSafeCall(hipBindTexture2D(0, &colorTextureRef, frame.colorData, &colorTextureRef.channelDesc, width, height, sizeof(uchar4)*width));
depthTextureRef.filterMode = hipFilterModePoint;
colorTextureRef.filterMode = hipFilterModePoint;
}
__global__ void resetHeapKernel(HashDataStruct hashData)
{
const HashParams& hashParams = c_hashParams;
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx == 0) {
hashData.d_heapCounter[0] = hashParams.m_numSDFBlocks - 1; //points to the last element of the array
}
if (idx < hashParams.m_numSDFBlocks) {
hashData.d_heap[idx] = hashParams.m_numSDFBlocks - idx - 1;
uint blockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
uint base_idx = idx * blockSize;
for (uint i = 0; i < blockSize; i++) {
hashData.deleteVoxel(base_idx+i);
}
}
}
__global__ void resetHashKernel(HashDataStruct hashData)
{
const HashParams& hashParams = c_hashParams;
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < hashParams.m_hashNumBuckets * HASH_BUCKET_SIZE) {
hashData.deleteHashEntry(hashData.d_hash[idx]);
hashData.deleteHashEntry(hashData.d_hashCompactified[idx]);
}
}
__global__ void resetHashBucketMutexKernel(HashDataStruct hashData)
{
const HashParams& hashParams = c_hashParams;
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < hashParams.m_hashNumBuckets) {
hashData.d_hashBucketMutex[idx] = FREE_ENTRY;
}
}
void resetCUDA(HashDataStruct& hashData, const HashParams& hashParams)
{
{
//resetting the heap and SDF blocks
const dim3 gridSize((hashParams.m_numSDFBlocks + (T_PER_BLOCK*T_PER_BLOCK) - 1)/(T_PER_BLOCK*T_PER_BLOCK), 1);
const dim3 blockSize((T_PER_BLOCK*T_PER_BLOCK), 1);
hipLaunchKernelGGL(( resetHeapKernel), dim3(gridSize), dim3(blockSize), 0, 0, hashData);
CUDA_CHECKED_NO_ERROR();
}
{
//resetting the hash
const dim3 gridSize((HASH_BUCKET_SIZE * hashParams.m_hashNumBuckets + (T_PER_BLOCK*T_PER_BLOCK) - 1)/(T_PER_BLOCK*T_PER_BLOCK), 1);
const dim3 blockSize((T_PER_BLOCK*T_PER_BLOCK), 1);
hipLaunchKernelGGL(( resetHashKernel), dim3(gridSize), dim3(blockSize), 0, 0, hashData);
CUDA_CHECKED_NO_ERROR();
}
{
//resetting the mutex
const dim3 gridSize((hashParams.m_hashNumBuckets + (T_PER_BLOCK*T_PER_BLOCK) - 1)/(T_PER_BLOCK*T_PER_BLOCK), 1);
const dim3 blockSize((T_PER_BLOCK*T_PER_BLOCK), 1);
hipLaunchKernelGGL(( resetHashBucketMutexKernel), dim3(gridSize), dim3(blockSize), 0, 0, hashData);
CUDA_CHECKED_NO_ERROR();
}
}
void resetHashBucketMutexCUDA(HashDataStruct& hashData, const HashParams& hashParams)
{
const dim3 gridSize((hashParams.m_hashNumBuckets + (T_PER_BLOCK*T_PER_BLOCK) - 1)/(T_PER_BLOCK*T_PER_BLOCK), 1);
const dim3 blockSize((T_PER_BLOCK*T_PER_BLOCK), 1);
hipLaunchKernelGGL(( resetHashBucketMutexKernel), dim3(gridSize), dim3(blockSize), 0, 0, hashData);
CUDA_CHECKED_NO_ERROR();
}
__device__
unsigned int linearizeChunkPos(const int3& chunkPos)
{
int3 p = chunkPos-c_hashParams.m_streamingMinGridPos;
return p.z * c_hashParams.m_streamingGridDimensions.x * c_hashParams.m_streamingGridDimensions.y +
p.y * c_hashParams.m_streamingGridDimensions.x +
p.x;
}
__device__
int3 worldToChunks(const float3& posWorld)
{
float3 p;
p.x = posWorld.x/c_hashParams.m_streamingVoxelExtents.x;
p.y = posWorld.y/c_hashParams.m_streamingVoxelExtents.y;
p.z = posWorld.z/c_hashParams.m_streamingVoxelExtents.z;
float3 s;
s.x = (float)sign(p.x);
s.y = (float)sign(p.y);
s.z = (float)sign(p.z);
return make_int3(p+s*0.5f);
}
__global__ void allocKernel(HashDataStruct hashData, CUDAFrame frame)
{
const HashParams& hashParams = c_hashParams;
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < frame.imageWidth && y < frame.imageHeight)
{
float d = tex2D(depthTextureRef, x, y);
if (d == MINF || d == 0.0f) return;
if (d >= hashParams.m_maxIntegrationDistance) return;
float t = hashData.getTruncation(d);
float minDepth = min(hashParams.m_maxIntegrationDistance, d-t);
float maxDepth = min(hashParams.m_maxIntegrationDistance, d+t);
if (minDepth >= maxDepth) return;
float3 rayMin = frame.unProject(x, y, minDepth);
rayMin = hashParams.m_rigidTransform * rayMin;
float3 rayMax = frame.unProject(x, y, maxDepth);
rayMax = hashParams.m_rigidTransform * rayMax;
float3 rayDir = normalize(rayMax - rayMin);
int3 idCurrentVoxel = hashData.worldToSDFBlock(rayMin);
int3 idEnd = hashData.worldToSDFBlock(rayMax);
float3 step = make_float3(sign(rayDir));
float3 boundaryPos = hashData.SDFBlockToWorld(idCurrentVoxel+make_int3(clamp(step, 0.0, 1.0f)))-0.5f*hashParams.m_virtualVoxelSize;
float3 tMax = (boundaryPos-rayMin)/rayDir;
float3 tDelta = (step*SDF_BLOCK_SIZE*hashParams.m_virtualVoxelSize)/rayDir;
int3 idBound = make_int3(make_float3(idEnd)+step);
//#pragma unroll
//for(int c = 0; c < 3; c++) {
// if (rayDir[c] == 0.0f) { tMax[c] = PINF; tDelta[c] = PINF; }
// if (boundaryPos[c] - rayMin[c] == 0.0f) { tMax[c] = PINF; tDelta[c] = PINF; }
//}
if (rayDir.x == 0.0f) { tMax.x = PINF; tDelta.x = PINF; }
if (boundaryPos.x - rayMin.x == 0.0f) { tMax.x = PINF; tDelta.x = PINF; }
if (rayDir.y == 0.0f) { tMax.y = PINF; tDelta.y = PINF; }
if (boundaryPos.y - rayMin.y == 0.0f) { tMax.y = PINF; tDelta.y = PINF; }
if (rayDir.z == 0.0f) { tMax.z = PINF; tDelta.z = PINF; }
if (boundaryPos.z - rayMin.z == 0.0f) { tMax.z = PINF; tDelta.z = PINF; }
unsigned int iter = 0; // iter < g_MaxLoopIterCount
unsigned int g_MaxLoopIterCount = 1024; //TODO MATTHIAS MOVE TO GLOBAL APP STATE
#pragma unroll 1
while(iter < g_MaxLoopIterCount) {
//check if it's in the frustum and not checked out
if (hashData.isSDFBlockInCameraFrustumApprox(idCurrentVoxel, frame)) {
hashData.allocBlock(idCurrentVoxel);
}
// Traverse voxel grid
if(tMax.x < tMax.y && tMax.x < tMax.z) {
idCurrentVoxel.x += step.x;
if(idCurrentVoxel.x == idBound.x) return;
tMax.x += tDelta.x;
}
else if(tMax.z < tMax.y) {
idCurrentVoxel.z += step.z;
if(idCurrentVoxel.z == idBound.z) return;
tMax.z += tDelta.z;
}
else {
idCurrentVoxel.y += step.y;
if(idCurrentVoxel.y == idBound.y) return;
tMax.y += tDelta.y;
}
iter++;
}
}
}
void allocCUDA(HashDataStruct& hashData, const HashParams& hashParams, const CUDAFrame& frame)
{
const dim3 gridSize((frame.imageWidth + T_PER_BLOCK - 1)/T_PER_BLOCK, (frame.imageHeight + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
hipLaunchKernelGGL(( allocKernel), dim3(gridSize), dim3(blockSize), 0, 0, hashData, frame);
CUDA_CHECKED_NO_ERROR();
}
__global__ void fillDecisionArrayKernel(HashDataStruct hashData, CUDAFrame frame)
{
const HashParams& hashParams = c_hashParams;
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < hashParams.m_hashNumBuckets * HASH_BUCKET_SIZE) {
hashData.d_hashDecision[idx] = 0;
if (hashData.d_hash[idx].ptr != FREE_ENTRY) {
if (hashData.isSDFBlockInCameraFrustumApprox(hashData.d_hash[idx].pos, frame))
{
hashData.d_hashDecision[idx] = 1; //yes
}
}
}
}
void fillDecisionArrayCUDA(HashDataStruct& hashData, const HashParams& hashParams, const CUDAFrame& frame)
{
const dim3 gridSize((HASH_BUCKET_SIZE * hashParams.m_hashNumBuckets + (T_PER_BLOCK*T_PER_BLOCK) - 1)/(T_PER_BLOCK*T_PER_BLOCK), 1);
const dim3 blockSize((T_PER_BLOCK*T_PER_BLOCK), 1);
hipLaunchKernelGGL(( fillDecisionArrayKernel), dim3(gridSize), dim3(blockSize), 0, 0, hashData, frame);
CUDA_CHECKED_NO_ERROR();
}
__global__ void compactifyHashKernel(HashDataStruct hashData)
{
const HashParams& hashParams = c_hashParams;
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < hashParams.m_hashNumBuckets * HASH_BUCKET_SIZE) {
if (hashData.d_hashDecision[idx] == 1) {
hashData.d_hashCompactified[hashData.d_hashDecisionPrefix[idx]-1] = hashData.d_hash[idx];
}
}
}
void compactifyHashCUDA(HashDataStruct& hashData, const HashParams& hashParams)
{
const dim3 gridSize((HASH_BUCKET_SIZE * hashParams.m_hashNumBuckets + (T_PER_BLOCK*T_PER_BLOCK) - 1)/(T_PER_BLOCK*T_PER_BLOCK), 1);
const dim3 blockSize((T_PER_BLOCK*T_PER_BLOCK), 1);
hipLaunchKernelGGL(( compactifyHashKernel), dim3(gridSize), dim3(blockSize), 0, 0, hashData);
CUDA_CHECKED_NO_ERROR();
}
#define COMPACTIFY_HASH_THREADS_PER_BLOCK 256
//#define COMPACTIFY_HASH_SIMPLE
__global__ void compactifyHashAllInOneKernel(HashDataStruct hashData, CUDAFrame frame)
{
const HashParams& hashParams = c_hashParams;
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ int localCounter;
if (threadIdx.x == 0) localCounter = 0;
__syncthreads();
int addrLocal = -1;
if (idx < hashParams.m_hashNumBuckets * HASH_BUCKET_SIZE) {
if (hashData.d_hash[idx].ptr != FREE_ENTRY) {
if (hashData.isSDFBlockInCameraFrustumApprox(hashData.d_hash[idx].pos, frame))
{
addrLocal = atomicAdd(&localCounter, 1);
}
}
}
__syncthreads();
__shared__ int addrGlobal;
if (threadIdx.x == 0 && localCounter > 0) {
addrGlobal = atomicAdd(hashData.d_hashCompactifiedCounter, localCounter);
}
__syncthreads();
if (addrLocal != -1) {
const unsigned int addr = addrGlobal + addrLocal;
hashData.d_hashCompactified[addr] = hashData.d_hash[idx];
}
}
unsigned int compactifyHashAllInOneCUDA(HashDataStruct& hashData, const HashParams& hashParams, const CUDAFrame &frame)
{
const unsigned int threadsPerBlock = COMPACTIFY_HASH_THREADS_PER_BLOCK;
const dim3 gridSize((HASH_BUCKET_SIZE * hashParams.m_hashNumBuckets + threadsPerBlock - 1) / threadsPerBlock, 1);
const dim3 blockSize(threadsPerBlock, 1);
cutilSafeCall(hipMemset(hashData.d_hashCompactifiedCounter, 0, sizeof(int)));
compactifyHashAllInOneKernel << <gridSize, blockSize >> >(hashData, frame);
unsigned int res = 0;
cutilSafeCall(hipMemcpy(&res, hashData.d_hashCompactifiedCounter, sizeof(unsigned int), hipMemcpyDeviceToHost));
CUDA_CHECKED_NO_ERROR();
return res;
}
template<bool deIntegrate = false>
__global__ void integrateDepthMapKernel(HashDataStruct hashData, CUDAFrame frame) {
const HashParams& hashParams = c_hashParams;
const HashEntry& entry = hashData.d_hashCompactified[blockIdx.x];
int3 pi_base = hashData.SDFBlockToVirtualVoxelPos(entry.pos);
uint i = threadIdx.x; //inside of an SDF block
int3 pi = pi_base + make_int3(hashData.delinearizeVoxelIndex(i));
float3 pf = hashData.virtualVoxelPosToWorld(pi);
pf = hashParams.m_rigidTransformInverse * pf;
float3 pixel = frame.project(pf);
uint2 screenPos = make_uint2((uint)pixel.x, (uint)pixel.y);
if (screenPos.x < frame.imageWidth && screenPos.y < frame.imageHeight) { //on screen
//float depth = g_InputDepth[screenPos];
float depth = tex2D(depthTextureRef, screenPos.x, screenPos.y);
uchar4 color_uc = tex2D(colorTextureRef, screenPos.x, screenPos.y);
float3 color = make_float3(color_uc.x, color_uc.y, color_uc.z);
if (color.x != MINF && depth != MINF) { // valid depth and color
//if (depth != MINF) { //valid depth
if (depth < hashParams.m_maxIntegrationDistance) {
float depthZeroOne = frame.cameraToProjZ(depth);
float sdf = depth - pf.z;
float truncation = hashData.getTruncation(depth);
//if (sdf > -truncation)
if (abs(sdf) < truncation)
{
if (sdf >= 0.0f) {
sdf = fminf(truncation, sdf);
} else {
sdf = fmaxf(-truncation, sdf);
}
float weightUpdate = max(hashParams.m_integrationWeightSample * 1.5f * (1.0f-depthZeroOne), 1.0f);
weightUpdate = 1.0f; //TODO remove that again
Voxel curr; //construct current voxel
curr.sdf = sdf;
curr.weight = weightUpdate;
curr.color = make_uchar4(color.x, color.y, color.z, 255);
uint idx = entry.ptr + i;
const Voxel& oldVoxel = hashData.d_SDFBlocks[idx];
Voxel newVoxel;
float3 oldColor = make_float3(oldVoxel.color.x, oldVoxel.color.y, oldVoxel.color.z);
float3 currColor = make_float3(curr.color.x, curr.color.y, curr.color.z);
if (!deIntegrate) { //integration
//hashData.combineVoxel(hashData.d_SDFBlocks[idx], curr, newVoxel);
float3 res;
if (oldVoxel.weight == 0) res = currColor;
//else res = (currColor + oldColor) / 2;
else res = 0.2f * currColor + 0.8f * oldColor;
//float3 res = (currColor*curr.weight + oldColor*oldVoxel.weight) / (curr.weight + oldVoxel.weight);
res = make_float3(round(res.x), round(res.y), round(res.z));
res = fmaxf(make_float3(0.0f), fminf(res, make_float3(254.5f)));
//newVoxel.color.x = (uchar)(res.x + 0.5f); newVoxel.color.y = (uchar)(res.y + 0.5f); newVoxel.color.z = (uchar)(res.z + 0.5f);
newVoxel.color = make_uchar4(res.x, res.y, res.z, 255);
newVoxel.sdf = (curr.sdf*curr.weight + oldVoxel.sdf*oldVoxel.weight) / (curr.weight + oldVoxel.weight);
newVoxel.weight = min((float)c_hashParams.m_integrationWeightMax, curr.weight + oldVoxel.weight);
}
else { //deintegration
//float3 res = 2 * c0 - c1;
float3 res = (oldColor*oldVoxel.weight - currColor*curr.weight) / (oldVoxel.weight - curr.weight);
res = make_float3(round(res.x), round(res.y), round(res.z));
res = fmaxf(make_float3(0.0f), fminf(res, make_float3(254.5f)));
//newVoxel.color.x = (uchar)(res.x + 0.5f); newVoxel.color.y = (uchar)(res.y + 0.5f); newVoxel.color.z = (uchar)(res.z + 0.5f);
newVoxel.color = make_uchar4(res.x, res.y, res.z, 255);
newVoxel.sdf = (oldVoxel.sdf*oldVoxel.weight - curr.sdf*curr.weight) / (oldVoxel.weight - curr.weight);
newVoxel.weight = max(0.0f, oldVoxel.weight - curr.weight);
if (newVoxel.weight <= 0.001f) {
newVoxel.sdf = 0.0f;
newVoxel.color = make_uchar4(0,0,0,0);
newVoxel.weight = 0.0f;
}
}
hashData.d_SDFBlocks[idx] = newVoxel;
}
}
}
}
}
void integrateDepthMapCUDA(HashDataStruct& hashData, const HashParams& hashParams, const CUDAFrame& frame)
{
const unsigned int threadsPerBlock = SDF_BLOCK_SIZE*SDF_BLOCK_SIZE*SDF_BLOCK_SIZE;
const dim3 gridSize(hashParams.m_numOccupiedBlocks, 1);
const dim3 blockSize(threadsPerBlock, 1);
hipLaunchKernelGGL(( integrateDepthMapKernel<false>) , dim3(gridSize), dim3(blockSize), 0, 0, hashData, frame);
CUDA_CHECKED_NO_ERROR();
}
void deIntegrateDepthMapCUDA(HashDataStruct& hashData, const HashParams& hashParams, const CUDAFrame& frame)
{
const unsigned int threadsPerBlock = SDF_BLOCK_SIZE*SDF_BLOCK_SIZE*SDF_BLOCK_SIZE;
const dim3 gridSize(hashParams.m_numOccupiedBlocks, 1);
const dim3 blockSize(threadsPerBlock, 1);
hipLaunchKernelGGL(( integrateDepthMapKernel<true>) , dim3(gridSize), dim3(blockSize) , 0, 0, hashData, frame);
CUDA_CHECKED_NO_ERROR();
}
__global__ void starveVoxelsKernel(HashDataStruct hashData) {
const uint idx = blockIdx.x;
const HashEntry& entry = hashData.d_hashCompactified[idx];
//is typically exectued only every n'th frame
int weight = hashData.d_SDFBlocks[entry.ptr + threadIdx.x].weight;
weight = max(0, weight-1);
hashData.d_SDFBlocks[entry.ptr + threadIdx.x].weight = weight;
}
void starveVoxelsKernelCUDA(HashDataStruct& hashData, const HashParams& hashParams)
{
const unsigned int threadsPerBlock = SDF_BLOCK_SIZE*SDF_BLOCK_SIZE*SDF_BLOCK_SIZE;
const dim3 gridSize(hashParams.m_numOccupiedBlocks, 1);
const dim3 blockSize(threadsPerBlock, 1);
hipLaunchKernelGGL(( starveVoxelsKernel), dim3(gridSize), dim3(blockSize), 0, 0, hashData);
CUDA_CHECKED_NO_ERROR();
}
//__shared__ float shared_MinSDF[SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE / 2];
__shared__ uint shared_MaxWeight[SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE / 2];
__global__ void garbageCollectIdentifyKernel(HashDataStruct hashData) {
const unsigned int hashIdx = blockIdx.x;
const HashEntry& entry = hashData.d_hashCompactified[hashIdx];
//uint h = hashData.computeHashPos(entry.pos);
//hashData.d_hashDecision[hashIdx] = 1;
//if (hashData.d_hashBucketMutex[h] == LOCK_ENTRY) return;
//if (entry.ptr == FREE_ENTRY) return; //should never happen since we did compactify before
//const uint linBlockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
const unsigned int idx0 = entry.ptr + 2*threadIdx.x+0;
const unsigned int idx1 = entry.ptr + 2*threadIdx.x+1;
Voxel v0 = hashData.d_SDFBlocks[idx0];
Voxel v1 = hashData.d_SDFBlocks[idx1];
//if (v0.weight == 0) v0.sdf = PINF;
//if (v1.weight == 0) v1.sdf = PINF;
//shared_MinSDF[threadIdx.x] = min(fabsf(v0.sdf), fabsf(v1.sdf)); //init shared memory
shared_MaxWeight[threadIdx.x] = max(v0.weight, v1.weight);
#pragma unroll 1
for (uint stride = 2; stride <= blockDim.x; stride <<= 1) {
__syncthreads();
if ((threadIdx.x & (stride-1)) == (stride-1)) {
//shared_MinSDF[threadIdx.x] = min(shared_MinSDF[threadIdx.x-stride/2], shared_MinSDF[threadIdx.x]);
shared_MaxWeight[threadIdx.x] = max(shared_MaxWeight[threadIdx.x-stride/2], shared_MaxWeight[threadIdx.x]);
}
}
__syncthreads();
if (threadIdx.x == blockDim.x - 1) {
uint maxWeight = shared_MaxWeight[threadIdx.x];
if (maxWeight == 0) {
hashData.d_hashDecision[hashIdx] = 1;
} else {
hashData.d_hashDecision[hashIdx] = 0;
}
}
}
void garbageCollectIdentifyCUDA(HashDataStruct& hashData, const HashParams& hashParams) {
const unsigned int threadsPerBlock = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE / 2;
const dim3 gridSize(hashParams.m_numOccupiedBlocks, 1);
const dim3 blockSize(threadsPerBlock, 1);
hipLaunchKernelGGL(( garbageCollectIdentifyKernel), dim3(gridSize), dim3(blockSize), 0, 0, hashData);
CUDA_CHECKED_NO_ERROR();
}
__global__ void garbageCollectFreeKernel(HashDataStruct hashData) {
//const uint hashIdx = blockIdx.x;
const uint hashIdx = blockIdx.x*blockDim.x + threadIdx.x;
if (hashIdx < c_hashParams.m_numOccupiedBlocks && hashData.d_hashDecision[hashIdx] != 0) { //decision to delete the hash entry
const HashEntry& entry = hashData.d_hashCompactified[hashIdx];
//if (entry.ptr == FREE_ENTRY) return; //should never happen since we did compactify before
if (hashData.deleteHashEntryElement(entry.pos)) { //delete hash entry from hash (and performs heap append)
const uint linBlockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
#pragma unroll 1
for (uint i = 0; i < linBlockSize; i++) { //clear sdf block: CHECK TODO another kernel?
hashData.deleteVoxel(entry.ptr + i);
}
}
}
}
void garbageCollectFreeCUDA(HashDataStruct& hashData, const HashParams& hashParams) {
const unsigned int threadsPerBlock = T_PER_BLOCK*T_PER_BLOCK;
const dim3 gridSize((hashParams.m_numOccupiedBlocks + threadsPerBlock - 1) / threadsPerBlock, 1);
const dim3 blockSize(threadsPerBlock, 1);
hipLaunchKernelGGL(( garbageCollectFreeKernel), dim3(gridSize), dim3(blockSize), 0, 0, hashData);
CUDA_CHECKED_NO_ERROR();
}
/** raycast */
__device__ __host__
RayCastData::RayCastData() {
d_depth = NULL;
d_depth4 = NULL;
d_normals = NULL;
d_colors = NULL;
d_vertexBuffer = NULL;
d_rayIntervalSplatMinArray = NULL;
d_rayIntervalSplatMaxArray = NULL;
}
extern "C" void updateConstantRayCastParams(const RayCastParams& params) {
size_t size;
cutilSafeCall(hipGetSymbolSize(&size, c_rayCastParams));
cutilSafeCall(hipMemcpyToSymbol(c_rayCastParams, ¶ms, size, 0, hipMemcpyHostToDevice));
#ifdef DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__host__
void RayCastData::updateParams(const RayCastParams ¶ms) {
updateConstantRayCastParams(params);
}
/////////////////
// Device part //
/////////////////
__device__
const RayCastParams &RayCastData::params() const {
return c_rayCastParams;
}
__device__
float RayCastData::frac(float val) const {
return (val - floorf(val));
}
__device__
float3 RayCastData::frac(const float3 &val) const {
return make_float3(frac(val.x), frac(val.y), frac(val.z));
}
__device__
bool RayCastData::trilinearInterpolationSimpleFastFast(const HashDataStruct &hash, const float3 &pos, float &dist,
uchar3 &color) const {
const float oSet = c_hashParams.m_virtualVoxelSize;
const float3 posDual = pos - make_float3(oSet / 2.0f, oSet / 2.0f, oSet / 2.0f);
float3 weight = frac(hash.worldToVirtualVoxelPosFloat(pos));
dist = 0.0f;
float3 colorFloat = make_float3(0.0f, 0.0f, 0.0f);
Voxel v = hash.getVoxel(posDual + make_float3(0.0f, 0.0f, 0.0f));
if (v.weight == 0) return false;
float3 vColor = make_float3(v.color.x, v.color.y, v.color.z);
dist += (1.0f - weight.x) * (1.0f - weight.y) * (1.0f - weight.z) * v.sdf;
colorFloat += (1.0f - weight.x) * (1.0f - weight.y) * (1.0f - weight.z) * vColor;
v = hash.getVoxel(posDual + make_float3(oSet, 0.0f, 0.0f));
if (v.weight == 0) return false;
vColor = make_float3(v.color.x, v.color.y, v.color.z);
dist += weight.x * (1.0f - weight.y) * (1.0f - weight.z) * v.sdf;
colorFloat += weight.x * (1.0f - weight.y) * (1.0f - weight.z) * vColor;
v = hash.getVoxel(posDual + make_float3(0.0f, oSet, 0.0f));
if (v.weight == 0) return false;
vColor = make_float3(v.color.x, v.color.y, v.color.z);
dist += (1.0f - weight.x) * weight.y * (1.0f - weight.z) * v.sdf;
colorFloat += (1.0f - weight.x) * weight.y * (1.0f - weight.z) * vColor;
v = hash.getVoxel(posDual + make_float3(0.0f, 0.0f, oSet));
if (v.weight == 0) return false;
vColor = make_float3(v.color.x, v.color.y, v.color.z);
dist += (1.0f - weight.x) * (1.0f - weight.y) * weight.z * v.sdf;
colorFloat += (1.0f - weight.x) * (1.0f - weight.y) * weight.z * vColor;
v = hash.getVoxel(posDual + make_float3(oSet, oSet, 0.0f));
if (v.weight == 0) return false;
vColor = make_float3(v.color.x, v.color.y, v.color.z);
dist += weight.x * weight.y * (1.0f - weight.z) * v.sdf;
colorFloat += weight.x * weight.y * (1.0f - weight.z) * vColor;
v = hash.getVoxel(posDual + make_float3(0.0f, oSet, oSet));
if (v.weight == 0) return false;
vColor = make_float3(v.color.x, v.color.y, v.color.z);
dist += (1.0f - weight.x) * weight.y * weight.z * v.sdf;
colorFloat += (1.0f - weight.x) * weight.y * weight.z * vColor;
v = hash.getVoxel(posDual + make_float3(oSet, 0.0f, oSet));
if (v.weight == 0) return false;
vColor = make_float3(v.color.x, v.color.y, v.color.z);
dist += weight.x * (1.0f - weight.y) * weight.z * v.sdf;
colorFloat += weight.x * (1.0f - weight.y) * weight.z * vColor;
v = hash.getVoxel(posDual + make_float3(oSet, oSet, oSet));
if (v.weight == 0) return false;
vColor = make_float3(v.color.x, v.color.y, v.color.z);
dist += weight.x * weight.y * weight.z * v.sdf;
colorFloat += weight.x * weight.y * weight.z * vColor;
color = make_uchar3(colorFloat.x, colorFloat.y, colorFloat.z);//v.color;
return true;
}
__device__
float RayCastData::findIntersectionLinear(float tNear, float tFar, float dNear, float dFar) const {
return tNear + (dNear / (dNear - dFar)) * (tFar - tNear);
}
// d0 near, d1 far
__device__
bool RayCastData::findIntersectionBisection(const HashDataStruct &hash, const float3 &worldCamPos, const float3 &worldDir,
float d0, float r0, float d1, float r1, float &alpha, uchar3 &color) const {
float a = r0;
float aDist = d0;
float b = r1;
float bDist = d1;
float c = 0.0f;
#pragma unroll 1
for (uint i = 0; i < nIterationsBisection; i++) {
c = findIntersectionLinear(a, b, aDist, bDist);
float cDist;
if (!trilinearInterpolationSimpleFastFast(hash, worldCamPos + c * worldDir, cDist, color)) return false;
if (aDist * cDist > 0.0) {
a = c;
aDist = cDist;
}
else {
b = c;
bDist = cDist;
}
}
alpha = c;
return true;
}
__device__
float3 RayCastData::gradientForPoint(const HashDataStruct &hash, const float3 &pos) const {
const float voxelSize = c_hashParams.m_virtualVoxelSize;
float3 offset = make_float3(voxelSize, voxelSize, voxelSize);
float distp00;
uchar3 colorp00;
trilinearInterpolationSimpleFastFast(hash, pos - make_float3(0.5f * offset.x, 0.0f, 0.0f), distp00, colorp00);
float dist0p0;
uchar3 color0p0;
trilinearInterpolationSimpleFastFast(hash, pos - make_float3(0.0f, 0.5f * offset.y, 0.0f), dist0p0, color0p0);
float dist00p;
uchar3 color00p;
trilinearInterpolationSimpleFastFast(hash, pos - make_float3(0.0f, 0.0f, 0.5f * offset.z), dist00p, color00p);
float dist100;
uchar3 color100;
trilinearInterpolationSimpleFastFast(hash, pos + make_float3(0.5f * offset.x, 0.0f, 0.0f), dist100, color100);
float dist010;
uchar3 color010;
trilinearInterpolationSimpleFastFast(hash, pos + make_float3(0.0f, 0.5f * offset.y, 0.0f), dist010, color010);
float dist001;
uchar3 color001;
trilinearInterpolationSimpleFastFast(hash, pos + make_float3(0.0f, 0.0f, 0.5f * offset.z), dist001, color001);
float3 grad = make_float3((distp00 - dist100) / offset.x, (dist0p0 - dist010) / offset.y,
(dist00p - dist001) / offset.z);
float l = length(grad);
if (l == 0.0f) {
return make_float3(0.0f, 0.0f, 0.0f);
}
return -grad / l;
}
__device__
void RayCastData::traverseCoarseGridSimpleSampleAll(const HashDataStruct& hash, const float3& worldCamPos, const float3& worldDir, const float3& camDir, const int3& dTid, float minInterval, float maxInterval) const
{
const RayCastParams& rayCastParams = c_rayCastParams;
// Last Sample
RayCastSample lastSample; lastSample.sdf = 0.0f; lastSample.alpha = 0.0f; lastSample.weight = 0; // lastSample.color = int3(0, 0, 0);
const float depthToRayLength = 1.0f/camDir.z; // scale factor to convert from depth to ray length
float rayCurrent = depthToRayLength * max(rayCastParams.m_minDepth, minInterval); // Convert depth to raylength
float rayEnd = depthToRayLength * min(rayCastParams.m_maxDepth, maxInterval); // Convert depth to raylength
//float rayCurrent = depthToRayLength * rayCastParams.m_minDepth; // Convert depth to raylength
//float rayEnd = depthToRayLength * rayCastParams.m_maxDepth; // Convert depth to raylength
#pragma unroll 1
while(rayCurrent < rayEnd)
{
float3 currentPosWorld = worldCamPos+rayCurrent*worldDir;
float dist; uchar3 color;
if(trilinearInterpolationSimpleFastFast(hash, currentPosWorld, dist, color))
{
if(lastSample.weight > 0 && lastSample.sdf > 0.0f && dist < 0.0f)// current sample is always valid here
//if(lastSample.weight > 0 && ((lastSample.sdf > 0.0f && dist < 0.0f) || (lastSample.sdf < 0.0f && dist > 0.0f))) //hack for top down video
{
float alpha; // = findIntersectionLinear(lastSample.alpha, rayCurrent, lastSample.sdf, dist);
uchar3 color2;
bool b = findIntersectionBisection(hash, worldCamPos, worldDir, lastSample.sdf, lastSample.alpha, dist, rayCurrent, alpha, color2);
float3 currentIso = worldCamPos+alpha*worldDir;
if(b && abs(lastSample.sdf - dist) < rayCastParams.m_thresSampleDist)
{
if(abs(dist) < rayCastParams.m_thresDist)
{
float depth = alpha / depthToRayLength; // Convert ray length to depth depthToRayLength
d_depth[dTid.y*rayCastParams.m_width+dTid.x] = depth;
d_depth4[dTid.y*rayCastParams.m_width+dTid.x] = make_float4(depthToCamera(dTid.x, dTid.y, depth), 1.0f);
d_colors[dTid.y*rayCastParams.m_width+dTid.x] = make_float4(color2.x/255.f, color2.y/255.f, color2.z/255.f, 1.0f);
if(rayCastParams.m_useGradients)
{
float3 normal = make_float3(0,0,0)-gradientForPoint(hash, currentIso);
float4 n = rayCastParams.m_viewMatrix * make_float4(normal, 0.0f);
d_normals[dTid.y*rayCastParams.m_width+dTid.x] = make_float4(n.x, n.y, n.z, 1.0f);
}
return;
}
}
}
lastSample.sdf = dist;
lastSample.alpha = rayCurrent;
// lastSample.color = color;
lastSample.weight = 1;
rayCurrent += rayCastParams.m_rayIncrement;
} else {
lastSample.weight = 0;
rayCurrent += rayCastParams.m_rayIncrement;
}
}
}
__global__ void computeNormalsDevice(float4* d_output, float4* d_input, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x >= width || y >= height) return;
d_output[y*width+x] = make_float4(MINF, MINF, MINF, MINF);
if(x > 0 && x < width-1 && y > 0 && y < height-1)
{
const float4 CC = d_input[(y+0)*width+(x+0)];
const float4 PC = d_input[(y+1)*width+(x+0)];
const float4 CP = d_input[(y+0)*width+(x+1)];
const float4 MC = d_input[(y-1)*width+(x+0)];
const float4 CM = d_input[(y+0)*width+(x-1)];
if(CC.x != MINF && PC.x != MINF && CP.x != MINF && MC.x != MINF && CM.x != MINF)
{
const float3 n = cross(make_float3(PC)-make_float3(MC), make_float3(CP)-make_float3(CM));
const float l = length(n);
if(l > 0.0f)
{
d_output[y*width+x] = make_float4(n/-l, 1.0f);
}
}
}
}
extern "C" void computeNormals(float4* d_output, float4* d_input, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1)/T_PER_BLOCK, (height + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
hipLaunchKernelGGL(( computeNormalsDevice), dim3(gridSize), dim3(blockSize), 0, 0, d_output, d_input, width, height);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
texture<float, hipTextureType2D, hipReadModeElementType> rayMinTextureRef;
texture<float, hipTextureType2D, hipReadModeElementType> rayMaxTextureRef;
__global__ void renderKernel(HashDataStruct hashData, RayCastData rayCastData)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
const RayCastParams& rayCastParams = c_rayCastParams;
if (x < rayCastParams.m_width && y < rayCastParams.m_height) {
rayCastData.d_depth[y*rayCastParams.m_width+x] = MINF;
rayCastData.d_depth4[y*rayCastParams.m_width+x] = make_float4(MINF,MINF,MINF,MINF);
rayCastData.d_normals[y*rayCastParams.m_width+x] = make_float4(MINF,MINF,MINF,MINF);
rayCastData.d_colors[y*rayCastParams.m_width+x] = make_float4(MINF,MINF,MINF,MINF);
float3 camDir = normalize(RayCastData::depthToCamera(x, y, 1.0f));
float3 worldCamPos = rayCastParams.m_viewMatrixInverse * make_float3(0.0f, 0.0f, 0.0f);
float4 w = rayCastParams.m_viewMatrixInverse * make_float4(camDir, 0.0f);
float3 worldDir = normalize(make_float3(w.x, w.y, w.z));
float minInterval = tex2D(rayMinTextureRef, x, y);
float maxInterval = tex2D(rayMaxTextureRef, x, y);
//float minInterval = rayCastParams.m_minDepth;
//float maxInterval = rayCastParams.m_maxDepth;
//if (minInterval == 0 || minInterval == MINF) minInterval = rayCastParams.m_minDepth;
//if (maxInterval == 0 || maxInterval == MINF) maxInterval = rayCastParams.m_maxDepth;
//TODO MATTHIAS: shouldn't this return in the case no interval is found?
if (minInterval == 0 || minInterval == MINF) return;
if (maxInterval == 0 || maxInterval == MINF) return;
minInterval = max(minInterval, rayCastParams.m_minDepth);
maxInterval = min(maxInterval, rayCastParams.m_maxDepth);
// debugging
//if (maxInterval < minInterval) {
// printf("ERROR (%d,%d): [ %f, %f ]\n", x, y, minInterval, maxInterval);
//}
rayCastData.traverseCoarseGridSimpleSampleAll(hashData, worldCamPos, worldDir, camDir, make_int3(x,y,1), minInterval, maxInterval);
}
}
extern "C" void renderCS(const HashDataStruct& hashData, const RayCastData &rayCastData, const RayCastParams &rayCastParams)
{
const dim3 gridSize((rayCastParams.m_width + T_PER_BLOCK - 1)/T_PER_BLOCK, (rayCastParams.m_height + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
hipBindTexture2D(0, &rayMinTextureRef, rayCastData.d_rayIntervalSplatMinArray, &depthTextureRef.channelDesc, rayCastParams.m_width, rayCastParams.m_height, sizeof(float)*rayCastParams.m_width);
hipBindTexture2D(0, &rayMaxTextureRef, rayCastData.d_rayIntervalSplatMaxArray, &depthTextureRef.channelDesc, rayCastParams.m_width, rayCastParams.m_height, sizeof(float)*rayCastParams.m_width);
hipLaunchKernelGGL(( renderKernel), dim3(gridSize), dim3(blockSize), 0, 0, hashData, rayCastData);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
/////////////////////////////////////////////////////////////////////////
// ray interval splatting
/////////////////////////////////////////////////////////////////////////
__global__ void resetRayIntervalSplatKernel(RayCastData data)
{
uint idx = blockIdx.x + blockIdx.y * NUM_GROUPS_X;
data.d_vertexBuffer[idx] = make_float4(MINF);
}
extern "C" void resetRayIntervalSplatCUDA(RayCastData& data, const RayCastParams& params)
{
const dim3 gridSize(NUM_GROUPS_X, (params.m_maxNumVertices + NUM_GROUPS_X - 1) / NUM_GROUPS_X, 1); // ! todo check if need third dimension?
const dim3 blockSize(1, 1, 1);
hipLaunchKernelGGL(( resetRayIntervalSplatKernel), dim3(gridSize), dim3(blockSize), 0, 0, data);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void rayIntervalSplatKernel(HashDataStruct hashData, RayCastData rayCastData)
{
uint idx = blockIdx.x + blockIdx.y * NUM_GROUPS_X;
const HashEntry& entry = hashData.d_hashCompactified[idx];
const RayCastParams& rayCastParams = c_rayCastParams;
if (entry.ptr != FREE_ENTRY) {
float3 posWorld = hashData.virtualVoxelPosToWorld(hashData.SDFBlockToVirtualVoxelPos(entry.pos)) +
c_hashParams.m_virtualVoxelSize * 0.5f * (SDF_BLOCK_SIZE - 1.0f);
if (rayCastData.isInCameraFrustumApprox(rayCastParams.m_viewMatrixInverse, posWorld)) return;
const RayCastParams ¶ms = c_rayCastParams;
const float4x4& viewMatrix = params.m_viewMatrix;
float3 worldCurrentVoxel = hashData.SDFBlockToWorld(entry.pos);
float3 MINV = worldCurrentVoxel - c_hashParams.m_virtualVoxelSize / 2.0f;
float3 maxv = MINV+SDF_BLOCK_SIZE*c_hashParams.m_virtualVoxelSize;
//float3 proj000 = DepthCameraData::cameraToKinectProj(viewMatrix * make_float3(MINV.x, MINV.y, MINV.z));
//float3 proj100 = DepthCameraData::cameraToKinectProj(viewMatrix * make_float3(maxv.x, MINV.y, MINV.z));
//float3 proj010 = DepthCameraData::cameraToKinectProj(viewMatrix * make_float3(MINV.x, maxv.y, MINV.z));
//float3 proj001 = DepthCameraData::cameraToKinectProj(viewMatrix * make_float3(MINV.x, MINV.y, maxv.z));
//float3 proj110 = DepthCameraData::cameraToKinectProj(viewMatrix * make_float3(maxv.x, maxv.y, MINV.z));
//float3 proj011 = DepthCameraData::cameraToKinectProj(viewMatrix * make_float3(MINV.x, maxv.y, maxv.z));
//float3 proj101 = DepthCameraData::cameraToKinectProj(viewMatrix * make_float3(maxv.x, MINV.y, maxv.z));
//float3 proj111 = DepthCameraData::cameraToKinectProj(viewMatrix * make_float3(maxv.x, maxv.y, maxv.z));
float3 proj000 = RayCastData::cameraToDepthProj(viewMatrix * make_float3(MINV.x, MINV.y, MINV.z));
float3 proj100 = RayCastData::cameraToDepthProj(viewMatrix * make_float3(maxv.x, MINV.y, MINV.z));
float3 proj010 = RayCastData::cameraToDepthProj(viewMatrix * make_float3(MINV.x, maxv.y, MINV.z));
float3 proj001 = RayCastData::cameraToDepthProj(viewMatrix * make_float3(MINV.x, MINV.y, maxv.z));
float3 proj110 = RayCastData::cameraToDepthProj(viewMatrix * make_float3(maxv.x, maxv.y, MINV.z));
float3 proj011 = RayCastData::cameraToDepthProj(viewMatrix * make_float3(MINV.x, maxv.y, maxv.z));
float3 proj101 = RayCastData::cameraToDepthProj(viewMatrix * make_float3(maxv.x, MINV.y, maxv.z));
float3 proj111 = RayCastData::cameraToDepthProj(viewMatrix * make_float3(maxv.x, maxv.y, maxv.z));
// Tree Reduction Min
float3 min00 = fminf(proj000, proj100);
float3 min01 = fminf(proj010, proj001);
float3 min10 = fminf(proj110, proj011);
float3 min11 = fminf(proj101, proj111);
float3 min0 = fminf(min00, min01);
float3 min1 = fminf(min10, min11);
float3 minFinal = fminf(min0, min1);
// Tree Reduction Max
float3 max00 = fmaxf(proj000, proj100);
float3 max01 = fmaxf(proj010, proj001);
float3 max10 = fmaxf(proj110, proj011);
float3 max11 = fmaxf(proj101, proj111);
float3 max0 = fmaxf(max00, max01);
float3 max1 = fmaxf(max10, max11);
float3 maxFinal = fmaxf(max0, max1);
float depth = maxFinal.z;
float * rayArray = rayCastData.d_rayIntervalSplatMaxArray;
if(params.m_splatMinimum == 1) {
depth = minFinal.z;
rayArray = rayCastData.d_rayIntervalSplatMinArray;
}
float depthWorld = RayCastData::depthProjToCameraZ(depth);
for(uint x=(uint)ceil(minFinal.x); x<maxFinal.x&&x<rayCastParams.m_width; x++) {
for(uint y=(uint)ceil(minFinal.y); y<maxFinal.y&&y<rayCastParams.m_height; y++) {
rayArray[y*rayCastParams.m_width+x] = depth;
}
}
}
}
extern "C" void rayIntervalSplatCUDA(const HashDataStruct& hashData, const RayCastData &rayCastData, const RayCastParams &rayCastParams)
{
const dim3 gridSize(NUM_GROUPS_X, (rayCastParams.m_numOccupiedSDFBlocks + NUM_GROUPS_X - 1) / NUM_GROUPS_X, 1);
const dim3 blockSize(1, 1, 1);
hipLaunchKernelGGL(( rayIntervalSplatKernel), dim3(gridSize), dim3(blockSize), 0, 0, hashData, rayCastData);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
/** cube */
__device__ __host__
MarchingCubesData::MarchingCubesData() {
d_params = NULL;
d_triangles = NULL;
d_numTriangles = NULL;
m_bIsOnGPU = false;
}
/////////////////
// Device part //
/////////////////
__device__
void MarchingCubesData::extractIsoSurfaceAtPosition(const float3 &worldPos, const HashDataStruct &hashData,
const RayCastData &rayCastData) {
const HashParams &hashParams = c_hashParams;
const MarchingCubesParams ¶ms = *d_params;
if (params.m_boxEnabled == 1) {
if (!isInBoxAA(params.m_minCorner, params.m_maxCorner, worldPos)) return;
}
const float isolevel = 0.0f;
const float P = hashParams.m_virtualVoxelSize / 2.0f;
const float M = -P;
float3 p000 = worldPos + make_float3(M, M, M);
float dist000;
uchar3 color000;
bool valid000 = rayCastData.trilinearInterpolationSimpleFastFast(hashData, p000, dist000, color000);
float3 p100 = worldPos + make_float3(P, M, M);
float dist100;
uchar3 color100;
bool valid100 = rayCastData.trilinearInterpolationSimpleFastFast(hashData, p100, dist100, color100);
float3 p010 = worldPos + make_float3(M, P, M);
float dist010;
uchar3 color010;
bool valid010 = rayCastData.trilinearInterpolationSimpleFastFast(hashData, p010, dist010, color010);
float3 p001 = worldPos + make_float3(M, M, P);
float dist001;
uchar3 color001;
bool valid001 = rayCastData.trilinearInterpolationSimpleFastFast(hashData, p001, dist001, color001);
float3 p110 = worldPos + make_float3(P, P, M);
float dist110;
uchar3 color110;
bool valid110 = rayCastData.trilinearInterpolationSimpleFastFast(hashData, p110, dist110, color110);
float3 p011 = worldPos + make_float3(M, P, P);
float dist011;
uchar3 color011;
bool valid011 = rayCastData.trilinearInterpolationSimpleFastFast(hashData, p011, dist011, color011);
float3 p101 = worldPos + make_float3(P, M, P);
float dist101;
uchar3 color101;
bool valid101 = rayCastData.trilinearInterpolationSimpleFastFast(hashData, p101, dist101, color101);
float3 p111 = worldPos + make_float3(P, P, P);
float dist111;
uchar3 color111;
bool valid111 = rayCastData.trilinearInterpolationSimpleFastFast(hashData, p111, dist111, color111);
if (!valid000 || !valid100 || !valid010 || !valid001 || !valid110 || !valid011 || !valid101 || !valid111) return;
uint cubeindex = 0;
if (dist010 < isolevel) cubeindex += 1;
if (dist110 < isolevel) cubeindex += 2;
if (dist100 < isolevel) cubeindex += 4;
if (dist000 < isolevel) cubeindex += 8;
if (dist011 < isolevel) cubeindex += 16;
if (dist111 < isolevel) cubeindex += 32;
if (dist101 < isolevel) cubeindex += 64;
if (dist001 < isolevel) cubeindex += 128;
const float thres = params.m_threshMarchingCubes;
float distArray[] = {dist000, dist100, dist010, dist001, dist110, dist011, dist101, dist111};
for (uint k = 0; k < 8; k++) {
for (uint l = 0; l < 8; l++) {
if (distArray[k] * distArray[l] < 0.0f) {
if (abs(distArray[k]) + abs(distArray[l]) > thres) return;
} else {
if (abs(distArray[k] - distArray[l]) > thres) return;
}
}
}
if (abs(dist000) > params.m_threshMarchingCubes2) return;
if (abs(dist100) > params.m_threshMarchingCubes2) return;
if (abs(dist010) > params.m_threshMarchingCubes2) return;
if (abs(dist001) > params.m_threshMarchingCubes2) return;
if (abs(dist110) > params.m_threshMarchingCubes2) return;
if (abs(dist011) > params.m_threshMarchingCubes2) return;
if (abs(dist101) > params.m_threshMarchingCubes2) return;
if (abs(dist111) > params.m_threshMarchingCubes2) return;
if (edgeTable[cubeindex] == 0 || edgeTable[cubeindex] == 255) return; // added by me edgeTable[cubeindex] == 255
Voxel v = hashData.getVoxel(worldPos);
Vertex vertlist[12];
if (edgeTable[cubeindex] & 1) vertlist[0] = vertexInterp(isolevel, p010, p110, dist010, dist110, v.color, v.color);
if (edgeTable[cubeindex] & 2) vertlist[1] = vertexInterp(isolevel, p110, p100, dist110, dist100, v.color, v.color);
if (edgeTable[cubeindex] & 4) vertlist[2] = vertexInterp(isolevel, p100, p000, dist100, dist000, v.color, v.color);
if (edgeTable[cubeindex] & 8) vertlist[3] = vertexInterp(isolevel, p000, p010, dist000, dist010, v.color, v.color);
if (edgeTable[cubeindex] & 16) vertlist[4] = vertexInterp(isolevel, p011, p111, dist011, dist111, v.color, v.color);
if (edgeTable[cubeindex] & 32) vertlist[5] = vertexInterp(isolevel, p111, p101, dist111, dist101, v.color, v.color);
if (edgeTable[cubeindex] & 64) vertlist[6] = vertexInterp(isolevel, p101, p001, dist101, dist001, v.color, v.color);
if (edgeTable[cubeindex] & 128)
vertlist[7] = vertexInterp(isolevel, p001, p011, dist001, dist011, v.color, v.color);
if (edgeTable[cubeindex] & 256)
vertlist[8] = vertexInterp(isolevel, p010, p011, dist010, dist011, v.color, v.color);
if (edgeTable[cubeindex] & 512)
vertlist[9] = vertexInterp(isolevel, p110, p111, dist110, dist111, v.color, v.color);
if (edgeTable[cubeindex] & 1024)
vertlist[10] = vertexInterp(isolevel, p100, p101, dist100, dist101, v.color, v.color);
if (edgeTable[cubeindex] & 2048)
vertlist[11] = vertexInterp(isolevel, p000, p001, dist000, dist001, v.color, v.color);
for (int i = 0; triTable[cubeindex][i] != -1; i += 3) {
Triangle t;
t.v0 = vertlist[triTable[cubeindex][i + 0]];
t.v1 = vertlist[triTable[cubeindex][i + 1]];
t.v2 = vertlist[triTable[cubeindex][i + 2]];
appendTriangle(t);
}
}
using Vertex = MarchingCubesData::Vertex;
using Triangle = MarchingCubesData::Triangle;
__device__
Vertex MarchingCubesData::vertexInterp(float isolevel, const float3 &p1, const float3 &p2, float d1, float d2,
const uchar4 &c1, const uchar4 &c2) const {
Vertex r1;
r1.p = p1;
r1.c = make_float3(c1.x, c1.y, c1.z) / 255.f;
Vertex r2;
r2.p = p2;
r2.c = make_float3(c2.x, c2.y, c2.z) / 255.f;
if (abs(isolevel - d1) < 0.00001f) return r1;
if (abs(isolevel - d2) < 0.00001f) return r2;
if (abs(d1 - d2) < 0.00001f) return r1;
float mu = (isolevel - d1) / (d2 - d1);
Vertex res;
res.p.x = p1.x + mu * (p2.x - p1.x); // Positions
res.p.y = p1.y + mu * (p2.y - p1.y);
res.p.z = p1.z + mu * (p2.z - p1.z);
res.c.x = (float) (c1.x + mu * (float) (c2.x - c1.x)) / 255.f; // Color
res.c.y = (float) (c1.y + mu * (float) (c2.y - c1.y)) / 255.f;
res.c.z = (float) (c1.z + mu * (float) (c2.z - c1.z)) / 255.f;
return res;
}
__device__
bool MarchingCubesData::isInBoxAA(const float3 &minCorner, const float3 &maxCorner, const float3 &pos) const {
if (pos.x < minCorner.x || pos.x > maxCorner.x) return false;
if (pos.y < minCorner.y || pos.y > maxCorner.y) return false;
if (pos.z < minCorner.z || pos.z > maxCorner.z) return false;
return true;
}
__device__
uint MarchingCubesData::append() {
uint addr = atomicAdd(d_numTriangles, 1);
//TODO check
return addr;
}
__device__
void MarchingCubesData::appendTriangle(const Triangle &t) {
if (*d_numTriangles >= d_params->m_maxNumTriangles) {
*d_numTriangles = d_params->m_maxNumTriangles;
return; // todo
}
uint addr = append();
if (addr >= d_params->m_maxNumTriangles) {
printf("marching cubes exceeded max number of triangles (addr, #tri, max#tri): (%d, %d, %d)\n", addr,
*d_numTriangles, d_params->m_maxNumTriangles);
*d_numTriangles = d_params->m_maxNumTriangles;
return; // todo
}
Triangle &triangle = d_triangles[addr];
triangle.v0 = t.v0;
triangle.v1 = t.v1;
triangle.v2 = t.v2;
return;
}
/** marching cube cuda*/
__global__ void resetMarchingCubesKernel(MarchingCubesData data) {
*data.d_numTriangles = 0;
}
__global__ void extractIsoSurfaceKernel(HashDataStruct hashData, RayCastData rayCastData, MarchingCubesData data) {
uint idx = blockIdx.x;
const HashEntry &entry = hashData.d_hash[idx];
if (entry.ptr != FREE_ENTRY) {
int3 pi_base = hashData.SDFBlockToVirtualVoxelPos(entry.pos);
int3 pi = pi_base + make_int3(threadIdx);
float3 worldPos = hashData.virtualVoxelPosToWorld(pi);
data.extractIsoSurfaceAtPosition(worldPos, hashData, rayCastData);
}
}
extern "C" void resetMarchingCubesCUDA(MarchingCubesData &data) {
const dim3 blockSize(1, 1, 1);
const dim3 gridSize(1, 1, 1);
hipLaunchKernelGGL(( resetMarchingCubesKernel), dim3(gridSize), dim3(blockSize), 0, 0, data);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
extern "C" void
extractIsoSurfaceCUDA(const HashDataStruct &hashData, const RayCastData &rayCastData, const MarchingCubesParams ¶ms,
MarchingCubesData &data) {
const dim3 gridSize(params.m_hashNumBuckets * params.m_hashBucketSize, 1, 1);
const dim3 blockSize(params.m_sdfBlockSize, params.m_sdfBlockSize, params.m_sdfBlockSize);
hipLaunchKernelGGL(( extractIsoSurfaceKernel), dim3(gridSize), dim3(blockSize), 0, 0, hashData, rayCastData, data);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
void CUDAMarchingCubesHashSDF::create(const MarchingCubesParams ¶ms) {
m_params = params;
m_data.allocate(m_params);
resetMarchingCubesCUDA(m_data);
}
void CUDAMarchingCubesHashSDF::extractIsoSurface(const HashDataStruct &hashData, const HashParams &hashParams,
const RayCastData &rayCastData, const vec3f &minCorner,
const vec3f &maxCorner, bool boxEnabled) {
resetMarchingCubesCUDA(m_data);
m_params.m_maxCorner = maxCorner;
m_params.m_minCorner = minCorner;
m_params.m_boxEnabled = boxEnabled;
m_data.updateParams(m_params);
extractIsoSurfaceCUDA(hashData, rayCastData, m_params, m_data);
copyTrianglesToCPU();
}
Mesh * CUDAMarchingCubesHashSDF::getMeshData() {
return m_meshData;
}
|
b6804e620b0aabb76546e0e96f8ae0847f69d535.cu
|
/**
* this file is based on https://github.com/niessner/BundleFusion.git
*/
#include "cuda_scene_rep.h"
#include <cuda_texture_types.h>
#include <pcl/Vertices.h>
#include <pcl/conversions.h>
#include "tables.h"
///////////////
// Host part //
///////////////
__device__ __host__
HashDataStruct::HashDataStruct() {
d_heap = NULL;
d_heapCounter = NULL;
d_hash = NULL;
d_hashDecision = NULL;
d_hashDecisionPrefix = NULL;
d_hashCompactified = NULL;
d_hashCompactifiedCounter = NULL;
d_SDFBlocks = NULL;
d_hashBucketMutex = NULL;
m_bIsOnGPU = false;
}
__host__
void HashDataStruct::allocate(const HashParams ¶ms, bool dataOnGPU) {
m_bIsOnGPU = dataOnGPU;
if (m_bIsOnGPU) {
cutilSafeCall(cudaMalloc(&d_heap, sizeof(unsigned int) * params.m_numSDFBlocks));
cutilSafeCall(cudaMalloc(&d_heapCounter, sizeof(unsigned int)));
cutilSafeCall(cudaMalloc(&d_hash, sizeof(HashEntry) * params.m_hashNumBuckets * params.m_hashBucketSize));
cutilSafeCall(cudaMalloc(&d_hashDecision, sizeof(int) * params.m_hashNumBuckets * params.m_hashBucketSize));
cutilSafeCall(
cudaMalloc(&d_hashDecisionPrefix, sizeof(int) * params.m_hashNumBuckets * params.m_hashBucketSize));
cutilSafeCall(
cudaMalloc(&d_hashCompactified, sizeof(HashEntry) * params.m_hashNumBuckets * params.m_hashBucketSize));
cutilSafeCall(cudaMalloc(&d_hashCompactifiedCounter, sizeof(int)));
cutilSafeCall(cudaMalloc(&d_SDFBlocks,
sizeof(Voxel) * params.m_numSDFBlocks * params.m_SDFBlockSize * params.m_SDFBlockSize *
params.m_SDFBlockSize));
cutilSafeCall(cudaMalloc(&d_hashBucketMutex, sizeof(int) * params.m_hashNumBuckets));
} else {
d_heap = new unsigned int[params.m_numSDFBlocks];
d_heapCounter = new unsigned int[1];
d_hash = new HashEntry[params.m_hashNumBuckets * params.m_hashBucketSize];
d_hashDecision = new int[params.m_hashNumBuckets * params.m_hashBucketSize];
d_hashDecisionPrefix = new int[params.m_hashNumBuckets * params.m_hashBucketSize];
d_hashCompactifiedCounter = new int[1];
d_hashCompactified = new HashEntry[params.m_hashNumBuckets * params.m_hashBucketSize];
d_SDFBlocks = new Voxel[params.m_numSDFBlocks * params.m_SDFBlockSize * params.m_SDFBlockSize *
params.m_SDFBlockSize];
d_hashBucketMutex = new int[params.m_hashNumBuckets];
}
updateParams(params);
}
extern "C" void updateConstantHashParams(const HashParams& params) {
size_t size;
CUDA_CHECKED_CALL(cudaGetSymbolSize(&size, c_hashParams));
CUDA_CHECKED_CALL(cudaMemcpyToSymbol(c_hashParams, ¶ms, size, 0, cudaMemcpyHostToDevice));
CUDA_CHECKED_CALL(cudaDeviceSynchronize());
}
__host__
void HashDataStruct::updateParams(const HashParams ¶ms) {
if (m_bIsOnGPU) {
updateConstantHashParams(params);
}
}
__host__
void HashDataStruct::free() {
if (m_bIsOnGPU) {
cutilSafeCall(cudaFree(d_heap));
cutilSafeCall(cudaFree(d_heapCounter));
cutilSafeCall(cudaFree(d_hash));
cutilSafeCall(cudaFree(d_hashDecision));
cutilSafeCall(cudaFree(d_hashDecisionPrefix));
cutilSafeCall(cudaFree(d_hashCompactified));
cutilSafeCall(cudaFree(d_hashCompactifiedCounter));
cutilSafeCall(cudaFree(d_SDFBlocks));
cutilSafeCall(cudaFree(d_hashBucketMutex));
} else {
if (d_heap) delete[] d_heap;
if (d_heapCounter) delete[] d_heapCounter;
if (d_hash) delete[] d_hash;
if (d_hashDecision) delete[] d_hashDecision;
if (d_hashDecisionPrefix) delete[] d_hashDecisionPrefix;
if (d_hashCompactified) delete[] d_hashCompactified;
if (d_hashCompactifiedCounter) delete[] d_hashCompactifiedCounter;
if (d_SDFBlocks) delete[] d_SDFBlocks;
if (d_hashBucketMutex) delete[] d_hashBucketMutex;
}
d_hash = NULL;
d_heap = NULL;
d_heapCounter = NULL;
d_hashDecision = NULL;
d_hashDecisionPrefix = NULL;
d_hashCompactified = NULL;
d_hashCompactifiedCounter = NULL;
d_SDFBlocks = NULL;
d_hashBucketMutex = NULL;
}
__host__
HashDataStruct HashDataStruct::copyToCPU() const {
HashParams params;
HashDataStruct hashData;
hashData.allocate(params, false); //allocate the data on the CPU
cutilSafeCall(
cudaMemcpy(hashData.d_heap, d_heap, sizeof(unsigned int) * params.m_numSDFBlocks, cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(hashData.d_heapCounter, d_heapCounter, sizeof(unsigned int), cudaMemcpyDeviceToHost));
cutilSafeCall(
cudaMemcpy(hashData.d_hash, d_hash, sizeof(HashEntry) * params.m_hashNumBuckets * params.m_hashBucketSize,
cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(hashData.d_hashDecision, d_hashDecision,
sizeof(int) * params.m_hashNumBuckets * params.m_hashBucketSize, cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(hashData.d_hashDecisionPrefix, d_hashDecisionPrefix,
sizeof(int) * params.m_hashNumBuckets * params.m_hashBucketSize, cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(hashData.d_hashCompactified, d_hashCompactified,
sizeof(HashEntry) * params.m_hashNumBuckets * params.m_hashBucketSize,
cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(hashData.d_SDFBlocks, d_SDFBlocks,
sizeof(Voxel) * params.m_numSDFBlocks * params.m_SDFBlockSize * params.m_SDFBlockSize *
params.m_SDFBlockSize, cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(hashData.d_hashBucketMutex, d_hashBucketMutex, sizeof(int) * params.m_hashNumBuckets,
cudaMemcpyDeviceToHost));
return hashData; //TODO MATTHIAS look at this (i.e,. when does memory get destroyed ; if it's in the destructer it would kill everything here
}
/////////////////
// Device part //
/////////////////
__device__
const HashParams &HashDataStruct::params() const {
return c_hashParams;
}
//! see teschner et al. (but with correct prime values)
__device__
uint HashDataStruct::computeHashPos(const int3 &virtualVoxelPos) const {
const int p0 = 73856093;
const int p1 = 19349669;
const int p2 = 83492791;
int res = ((virtualVoxelPos.x * p0) ^ (virtualVoxelPos.y * p1) ^ (virtualVoxelPos.z * p2)) %
c_hashParams.m_hashNumBuckets;
if (res < 0) res += c_hashParams.m_hashNumBuckets;
return (uint) res;
}
//merges two voxels (v0 is the input voxel, v1 the currently stored voxel)
__device__
void HashDataStruct::combineVoxel(const Voxel &v0, const Voxel &v1, Voxel &out) const {
//v.color = (10*v0.weight * v0.color + v1.weight * v1.color)/(10*v0.weight + v1.weight); //give the currently observed color more weight
//v.color = (v0.weight * v0.color + v1.weight * v1.color)/(v0.weight + v1.weight);
//out.color = 0.5f * (v0.color + v1.color); //exponential running average
float3 c0 = make_float3(v0.color.x, v0.color.y, v0.color.z);
float3 c1 = make_float3(v1.color.x, v1.color.y, v1.color.z);
//float3 res = (c0+c1)/2;
//float3 res = (c0 * (float)v0.weight + c1 * (float)v1.weight) / ((float)v0.weight + (float)v1.weight);
//float3 res = c1;
if (v0.weight == 0) out.color = v1.color;
else {
float3 res = 0.5f * c0 + 0.5f * c1;
out.color.x = (uchar)(res.x + 0.5f);
out.color.y = (uchar)(res.y + 0.5f);
out.color.z = (uchar)(res.z + 0.5f);
}
out.sdf = (v0.sdf * (float) v0.weight + v1.sdf * (float) v1.weight) / ((float) v0.weight + (float) v1.weight);
//out.weight = min(c_hashParams.m_integrationWeightMax, (unsigned int)v0.weight + (unsigned int)v1.weight);
out.weight = min((float) c_hashParams.m_integrationWeightMax, v0.weight + v1.weight);
}
__device__
void HashDataStruct::combineVoxelDepthOnly(const Voxel &v0, const Voxel &v1, Voxel &out) const {
out.sdf = (v0.sdf * (float) v0.weight + v1.sdf * (float) v1.weight) / ((float) v0.weight + (float) v1.weight);
out.weight = min((float) c_hashParams.m_integrationWeightMax, v0.weight + v1.weight);
}
//! returns the truncation of the SDF for a given distance value
__device__
float HashDataStruct::getTruncation(float z) const {
return c_hashParams.m_truncation + c_hashParams.m_truncScale * z;
}
__device__
float3 HashDataStruct::worldToVirtualVoxelPosFloat(const float3 &pos) const {
return pos / c_hashParams.m_virtualVoxelSize;
}
__device__
int3 HashDataStruct::worldToVirtualVoxelPos(const float3 &pos) const {
//const float3 p = pos*g_VirtualVoxelResolutionScalar;
const float3 p = pos / c_hashParams.m_virtualVoxelSize;
return make_int3(p + make_float3(sign(p)) * 0.5f);
}
__device__
int3 HashDataStruct::virtualVoxelPosToSDFBlock(int3 virtualVoxelPos) const {
if (virtualVoxelPos.x < 0) virtualVoxelPos.x -= SDF_BLOCK_SIZE - 1;
if (virtualVoxelPos.y < 0) virtualVoxelPos.y -= SDF_BLOCK_SIZE - 1;
if (virtualVoxelPos.z < 0) virtualVoxelPos.z -= SDF_BLOCK_SIZE - 1;
return make_int3(
virtualVoxelPos.x / SDF_BLOCK_SIZE,
virtualVoxelPos.y / SDF_BLOCK_SIZE,
virtualVoxelPos.z / SDF_BLOCK_SIZE);
}
// Computes virtual voxel position of corner sample position
__device__
int3 HashDataStruct::SDFBlockToVirtualVoxelPos(const int3 &sdfBlock) const {
return sdfBlock * SDF_BLOCK_SIZE;
}
__device__
float3 HashDataStruct::virtualVoxelPosToWorld(const int3 &pos) const {
return make_float3(pos) * c_hashParams.m_virtualVoxelSize;
}
__device__
float3 HashDataStruct::SDFBlockToWorld(const int3 &sdfBlock) const {
return virtualVoxelPosToWorld(SDFBlockToVirtualVoxelPos(sdfBlock));
}
__device__
int3 HashDataStruct::worldToSDFBlock(const float3 &worldPos) const {
return virtualVoxelPosToSDFBlock(worldToVirtualVoxelPos(worldPos));
}
__device__
bool HashDataStruct::isSDFBlockInCameraFrustumApprox(const int3 &sdfBlock, CUDAFrame &frame) {
float3 posWorld = virtualVoxelPosToWorld(SDFBlockToVirtualVoxelPos(sdfBlock)) +
c_hashParams.m_virtualVoxelSize * 0.5f * (SDF_BLOCK_SIZE - 1.0f);
return frame.isInCameraFrustumApprox(posWorld);
}
//! computes the (local) virtual voxel pos of an index; idx in [0;511]
__device__
uint3 HashDataStruct::delinearizeVoxelIndex(uint idx) const {
uint x = idx % SDF_BLOCK_SIZE;
uint y = (idx % (SDF_BLOCK_SIZE * SDF_BLOCK_SIZE)) / SDF_BLOCK_SIZE;
uint z = idx / (SDF_BLOCK_SIZE * SDF_BLOCK_SIZE);
return make_uint3(x, y, z);
}
//! computes the linearized index of a local virtual voxel pos; pos in [0;7]^3
__device__
uint HashDataStruct::linearizeVoxelPos(const int3 &virtualVoxelPos) const {
return
virtualVoxelPos.z * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE +
virtualVoxelPos.y * SDF_BLOCK_SIZE +
virtualVoxelPos.x;
}
__device__
int HashDataStruct::virtualVoxelPosToLocalSDFBlockIndex(const int3 &virtualVoxelPos) const {
int3 localVoxelPos = make_int3(
virtualVoxelPos.x % SDF_BLOCK_SIZE,
virtualVoxelPos.y % SDF_BLOCK_SIZE,
virtualVoxelPos.z % SDF_BLOCK_SIZE);
if (localVoxelPos.x < 0) localVoxelPos.x += SDF_BLOCK_SIZE;
if (localVoxelPos.y < 0) localVoxelPos.y += SDF_BLOCK_SIZE;
if (localVoxelPos.z < 0) localVoxelPos.z += SDF_BLOCK_SIZE;
return linearizeVoxelPos(localVoxelPos);
}
__device__
int HashDataStruct::worldToLocalSDFBlockIndex(const float3 &world) const {
int3 virtualVoxelPos = worldToVirtualVoxelPos(world);
return virtualVoxelPosToLocalSDFBlockIndex(virtualVoxelPos);
}
//! returns the hash entry for a given worldPos; if there was no hash entry the returned entry will have a ptr with FREE_ENTRY set
__device__
HashEntry HashDataStruct::getHashEntry(const float3 &worldPos) const {
//int3 blockID = worldToSDFVirtualVoxelPos(worldPos)/SDF_BLOCK_SIZE; //position of sdf block
int3 blockID = worldToSDFBlock(worldPos);
return getHashEntryForSDFBlockPos(blockID);
}
__device__
void HashDataStruct::deleteHashEntry(uint id) {
deleteHashEntry(d_hash[id]);
}
__device__
void HashDataStruct::deleteHashEntry(HashEntry &hashEntry) {
hashEntry.pos = make_int3(0);
hashEntry.offset = 0;
hashEntry.ptr = FREE_ENTRY;
}
__device__
bool HashDataStruct::voxelExists(const float3 &worldPos) const {
HashEntry hashEntry = getHashEntry(worldPos);
return (hashEntry.ptr != FREE_ENTRY);
}
__device__
void HashDataStruct::deleteVoxel(Voxel &v) const {
v.color = make_uchar4(0, 0, 0, 0);
v.weight = 0.0f;
v.sdf = 0.0f;
}
__device__
void HashDataStruct::deleteVoxel(uint id) {
deleteVoxel(d_SDFBlocks[id]);
}
__device__
Voxel HashDataStruct::getVoxel(const float3 &worldPos) const {
HashEntry hashEntry = getHashEntry(worldPos);
Voxel v;
if (hashEntry.ptr == FREE_ENTRY) {
deleteVoxel(v);
} else {
int3 virtualVoxelPos = worldToVirtualVoxelPos(worldPos);
v = d_SDFBlocks[hashEntry.ptr + virtualVoxelPosToLocalSDFBlockIndex(virtualVoxelPos)];
}
return v;
}
__device__
Voxel HashDataStruct::getVoxel(const int3 &virtualVoxelPos) const {
HashEntry hashEntry = getHashEntryForSDFBlockPos(virtualVoxelPosToSDFBlock(virtualVoxelPos));
Voxel v;
if (hashEntry.ptr == FREE_ENTRY) {
deleteVoxel(v);
} else {
v = d_SDFBlocks[hashEntry.ptr + virtualVoxelPosToLocalSDFBlockIndex(virtualVoxelPos)];
}
return v;
}
__device__
void HashDataStruct::setVoxel(const int3 &virtualVoxelPos, Voxel &voxelInput) const {
HashEntry hashEntry = getHashEntryForSDFBlockPos(virtualVoxelPosToSDFBlock(virtualVoxelPos));
if (hashEntry.ptr != FREE_ENTRY) {
d_SDFBlocks[hashEntry.ptr + virtualVoxelPosToLocalSDFBlockIndex(virtualVoxelPos)] = voxelInput;
}
}
//! returns the hash entry for a given sdf block id; if there was no hash entry the returned entry will have a ptr with FREE_ENTRY set
__device__
HashEntry HashDataStruct::getHashEntryForSDFBlockPos(const int3 &sdfBlock) const {
uint h = computeHashPos(sdfBlock); //hash bucket
uint hp = h * HASH_BUCKET_SIZE; //hash position
HashEntry entry;
entry.pos = sdfBlock;
entry.offset = 0;
entry.ptr = FREE_ENTRY;
for (uint j = 0; j < HASH_BUCKET_SIZE; j++) {
uint i = j + hp;
HashEntry curr = d_hash[i];
if (curr.pos.x == entry.pos.x && curr.pos.y == entry.pos.y && curr.pos.z == entry.pos.z &&
curr.ptr != FREE_ENTRY) {
return curr;
}
}
#ifdef HANDLE_COLLISIONS
const uint idxLastEntryInBucket = (h + 1) * HASH_BUCKET_SIZE - 1;
int i = idxLastEntryInBucket; //start with the last entry of the current bucket
HashEntry curr;
//traverse list until end: memorize idx at list end and memorize offset from last element of bucket to list end
unsigned int maxIter = 0;
uint g_MaxLoopIterCount = c_hashParams.m_hashMaxCollisionLinkedListSize;
#pragma unroll 1
while (maxIter < g_MaxLoopIterCount) {
curr = d_hash[i];
if (curr.pos.x == entry.pos.x && curr.pos.y == entry.pos.y && curr.pos.z == entry.pos.z &&
curr.ptr != FREE_ENTRY) {
return curr;
}
if (curr.offset == 0) { //we have found the end of the list
break;
}
i = idxLastEntryInBucket + curr.offset; //go to next element in the list
i %= (HASH_BUCKET_SIZE * c_hashParams.m_hashNumBuckets); //check for overflow
maxIter++;
}
#endif
return entry;
}
//for histogram (no collision traversal)
__device__
unsigned int HashDataStruct::getNumHashEntriesPerBucket(unsigned int bucketID) {
unsigned int h = 0;
for (uint i = 0; i < HASH_BUCKET_SIZE; i++) {
if (d_hash[bucketID * HASH_BUCKET_SIZE + i].ptr != FREE_ENTRY) {
h++;
}
}
return h;
}
//for histogram (collisions traversal only)
__device__
unsigned int HashDataStruct::getNumHashLinkedList(unsigned int bucketID) {
unsigned int listLen = 0;
#ifdef HANDLE_COLLISIONS
const uint idxLastEntryInBucket = (bucketID + 1) * HASH_BUCKET_SIZE - 1;
unsigned int i = idxLastEntryInBucket; //start with the last entry of the current bucket
//int offset = 0;
HashEntry curr;
curr.offset = 0;
//traverse list until end: memorize idx at list end and memorize offset from last element of bucket to list end
unsigned int maxIter = 0;
uint g_MaxLoopIterCount = c_hashParams.m_hashMaxCollisionLinkedListSize;
#pragma unroll 1
while (maxIter < g_MaxLoopIterCount) {
//offset = curr.offset;
//curr = getHashEntry(g_Hash, i);
curr = d_hash[i];
if (curr.offset == 0) { //we have found the end of the list
break;
}
i = idxLastEntryInBucket + curr.offset; //go to next element in the list
i %= (HASH_BUCKET_SIZE * c_hashParams.m_hashNumBuckets); //check for overflow
listLen++;
maxIter++;
}
#endif
return listLen;
}
__device__
uint HashDataStruct::consumeHeap() {
uint addr = atomicSub(&d_heapCounter[0], 1);
//TODO MATTHIAS check some error handling?
return d_heap[addr];
}
__device__
void HashDataStruct::appendHeap(uint ptr) {
uint addr = atomicAdd(&d_heapCounter[0], 1);
//TODO MATTHIAS check some error handling?
d_heap[addr + 1] = ptr;
}
//pos in SDF block coordinates
__device__
void HashDataStruct::allocBlock(const int3 &pos) {
uint h = computeHashPos(pos); //hash bucket
uint hp = h * HASH_BUCKET_SIZE; //hash position
int firstEmpty = -1;
for (uint j = 0; j < HASH_BUCKET_SIZE; j++) {
uint i = j + hp;
const HashEntry &curr = d_hash[i];
//in that case the SDF-block is already allocated and corresponds to the current position -> exit thread
if (curr.pos.x == pos.x && curr.pos.y == pos.y && curr.pos.z == pos.z && curr.ptr != FREE_ENTRY) {
return;
}
//store the first FREE_ENTRY hash entry
if (firstEmpty == -1 && curr.ptr == FREE_ENTRY) {
firstEmpty = i;
}
}
#ifdef HANDLE_COLLISIONS
//updated variables as after the loop
const uint idxLastEntryInBucket = (h + 1) * HASH_BUCKET_SIZE - 1; //get last index of bucket
uint i = idxLastEntryInBucket; //start with the last entry of the current bucket
//int offset = 0;
HashEntry curr;
curr.offset = 0;
//traverse list until end: memorize idx at list end and memorize offset from last element of bucket to list end
//int k = 0;
unsigned int maxIter = 0;
uint g_MaxLoopIterCount = c_hashParams.m_hashMaxCollisionLinkedListSize;
#pragma unroll 1
while (maxIter < g_MaxLoopIterCount) {
//offset = curr.offset;
curr = d_hash[i]; //TODO MATTHIAS do by reference
if (curr.pos.x == pos.x && curr.pos.y == pos.y && curr.pos.z == pos.z && curr.ptr != FREE_ENTRY) {
return;
}
if (curr.offset == 0) { //we have found the end of the list
break;
}
i = idxLastEntryInBucket + curr.offset; //go to next element in the list
i %= (HASH_BUCKET_SIZE * c_hashParams.m_hashNumBuckets); //check for overflow
maxIter++;
}
#endif
if (firstEmpty != -1) { //if there is an empty entry and we haven't allocated the current entry before
//int prevValue = 0;
//InterlockedExchange(d_hashBucketMutex[h], LOCK_ENTRY, prevValue); //lock the hash bucket
int prevValue = atomicExch(&d_hashBucketMutex[h], LOCK_ENTRY);
if (prevValue != LOCK_ENTRY) { //only proceed if the bucket has been locked
HashEntry &entry = d_hash[firstEmpty];
entry.pos = pos;
entry.offset = NO_OFFSET;
long index = consumeHeap();
entry.ptr = index * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE; //memory alloc
}
return;
}
#ifdef HANDLE_COLLISIONS
//if (i != idxLastEntryInBucket) return;
int offset = 0;
//linear search for free entry
maxIter = 0;
#pragma unroll 1
while (maxIter < g_MaxLoopIterCount) {
offset++;
i = (idxLastEntryInBucket + offset) %
(HASH_BUCKET_SIZE * c_hashParams.m_hashNumBuckets); //go to next hash element
if ((offset % HASH_BUCKET_SIZE) == 0)
continue; //cannot insert into a last bucket element (would conflict with other linked lists)
curr = d_hash[i];
//if (curr.pos.x == pos.x && curr.pos.y == pos.y && curr.pos.z == pos.z && curr.ptr != FREE_ENTRY) {
// return;
//}
if (curr.ptr == FREE_ENTRY) { //this is the first free entry
//int prevValue = 0;
//InterlockedExchange(g_HashBucketMutex[h], LOCK_ENTRY, prevValue); //lock the original hash bucket
int prevValue = atomicExch(&d_hashBucketMutex[h], LOCK_ENTRY);
if (prevValue != LOCK_ENTRY) {
HashEntry lastEntryInBucket = d_hash[idxLastEntryInBucket];
h = i / HASH_BUCKET_SIZE;
//InterlockedExchange(g_HashBucketMutex[h], LOCK_ENTRY, prevValue); //lock the hash bucket where we have found a free entry
prevValue = atomicExch(&d_hashBucketMutex[h], LOCK_ENTRY);
if (prevValue != LOCK_ENTRY) { //only proceed if the bucket has been locked
HashEntry &entry = d_hash[i];
entry.pos = pos;
entry.offset = lastEntryInBucket.offset;
long index = consumeHeap();
entry.ptr = index * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE; //memory alloc
lastEntryInBucket.offset = offset;
d_hash[idxLastEntryInBucket] = lastEntryInBucket;
//setHashEntry(g_Hash, idxLastEntryInBucket, lastEntryInBucket);
}
}
return; //bucket was already locked
}
maxIter++;
}
#endif
}
//!inserts a hash entry without allocating any memory: used by streaming: TODO MATTHIAS check the atomics in this function
__device__
bool HashDataStruct::insertHashEntry(HashEntry entry) {
uint h = computeHashPos(entry.pos);
uint hp = h * HASH_BUCKET_SIZE;
for (uint j = 0; j < HASH_BUCKET_SIZE; j++) {
uint i = j + hp;
//const HashEntry& curr = d_hash[i];
int prevWeight = 0;
//InterlockedCompareExchange(hash[3*i+2], FREE_ENTRY, LOCK_ENTRY, prevWeight);
prevWeight = atomicCAS(&d_hash[i].ptr, FREE_ENTRY, LOCK_ENTRY);
if (prevWeight == FREE_ENTRY) {
d_hash[i] = entry;
//setHashEntry(hash, i, entry);
return true;
}
}
#ifdef HANDLE_COLLISIONS
//updated variables as after the loop
const uint idxLastEntryInBucket = (h + 1) * HASH_BUCKET_SIZE - 1; //get last index of bucket
uint i = idxLastEntryInBucket; //start with the last entry of the current bucket
HashEntry curr;
unsigned int maxIter = 0;
//[allow_uav_condition]
uint g_MaxLoopIterCount = c_hashParams.m_hashMaxCollisionLinkedListSize;
#pragma unroll 1
while (maxIter <
g_MaxLoopIterCount) { //traverse list until end // why find the end? we you are inserting at the start !!!
//curr = getHashEntry(hash, i);
curr = d_hash[i]; //TODO MATTHIAS do by reference
if (curr.offset == 0) break; //we have found the end of the list
i = idxLastEntryInBucket + curr.offset; //go to next element in the list
i %= (HASH_BUCKET_SIZE * c_hashParams.m_hashNumBuckets); //check for overflow
maxIter++;
}
maxIter = 0;
int offset = 0;
#pragma unroll 1
while (maxIter <
g_MaxLoopIterCount) { //linear search for free entry
offset++;
uint i = (idxLastEntryInBucket + offset) %
(HASH_BUCKET_SIZE * c_hashParams.m_hashNumBuckets); //go to next hash element
if ((offset % HASH_BUCKET_SIZE) == 0)
continue; //cannot insert into a last bucket element (would conflict with other linked lists)
int prevWeight = 0;
//InterlockedCompareExchange(hash[3*i+2], FREE_ENTRY, LOCK_ENTRY, prevWeight); //check for a free entry
uint *d_hashUI = (uint *) d_hash;
prevWeight = prevWeight = atomicCAS(&d_hashUI[3 * idxLastEntryInBucket + 1], (uint) FREE_ENTRY,
(uint) LOCK_ENTRY);
if (prevWeight ==
FREE_ENTRY) { //if free entry found set prev->next = curr & curr->next = prev->next
//[allow_uav_condition]
//while(hash[3*idxLastEntryInBucket+2] == LOCK_ENTRY); // expects setHashEntry to set the ptr last, required because pos.z is packed into the same value -> prev->next = curr -> might corrput pos.z
HashEntry lastEntryInBucket = d_hash[idxLastEntryInBucket]; //get prev (= lastEntry in Bucket)
int newOffsetPrev =
(offset << 16) | (lastEntryInBucket.pos.z & 0x0000ffff); //prev->next = curr (maintain old z-pos)
int oldOffsetPrev = 0;
//InterlockedExchange(hash[3*idxLastEntryInBucket+1], newOffsetPrev, oldOffsetPrev); //set prev offset atomically
uint *d_hashUI = (uint *) d_hash;
oldOffsetPrev = prevWeight = atomicExch(&d_hashUI[3 * idxLastEntryInBucket + 1], newOffsetPrev);
entry.offset = oldOffsetPrev
>> 16; //remove prev z-pos from old offset
//setHashEntry(hash, i, entry); //sets the current hashEntry with: curr->next = prev->next
d_hash[i] = entry;
return true;
}
maxIter++;
}
#endif
return false;
}
//! deletes a hash entry position for a given sdfBlock index (returns true uppon successful deletion; otherwise returns false)
__device__
bool HashDataStruct::deleteHashEntryElement(const int3 &sdfBlock) {
uint h = computeHashPos(sdfBlock); //hash bucket
uint hp = h * HASH_BUCKET_SIZE; //hash position
for (uint j = 0; j < HASH_BUCKET_SIZE; j++) {
uint i = j + hp;
const HashEntry &curr = d_hash[i];
if (curr.pos.x == sdfBlock.x && curr.pos.y == sdfBlock.y && curr.pos.z == sdfBlock.z &&
curr.ptr != FREE_ENTRY) {
#ifndef HANDLE_COLLISIONS
const uint linBlockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
appendHeap(curr.ptr / linBlockSize);
//heapAppend.Append(curr.ptr / linBlockSize);
deleteHashEntry(i);
return true;
#endif
#ifdef HANDLE_COLLISIONS
if (curr.offset != 0) { //if there was a pointer set it to the next list element
//int prevValue = 0;
//InterlockedExchange(bucketMutex[h], LOCK_ENTRY, prevValue); //lock the hash bucket
int prevValue = atomicExch(&d_hashBucketMutex[h], LOCK_ENTRY);
if (prevValue == LOCK_ENTRY) return false;
if (prevValue != LOCK_ENTRY) {
const uint linBlockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
appendHeap(curr.ptr / linBlockSize);
//heapAppend.Append(curr.ptr / linBlockSize);
int nextIdx = (i + curr.offset) % (HASH_BUCKET_SIZE * c_hashParams.m_hashNumBuckets);
//setHashEntry(hash, i, getHashEntry(hash, nextIdx));
d_hash[i] = d_hash[nextIdx];
deleteHashEntry(nextIdx);
return true;
}
} else {
const uint linBlockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
appendHeap(curr.ptr / linBlockSize);
//heapAppend.Append(curr.ptr / linBlockSize);
deleteHashEntry(i);
return true;
}
#endif //HANDLE_COLLSISION
}
}
#ifdef HANDLE_COLLISIONS
const uint idxLastEntryInBucket = (h + 1) * HASH_BUCKET_SIZE - 1;
int i = idxLastEntryInBucket;
HashEntry curr;
curr = d_hash[i];
int prevIdx = i;
i = idxLastEntryInBucket + curr.offset; //go to next element in the list
i %= (HASH_BUCKET_SIZE * c_hashParams.m_hashNumBuckets); //check for overflow
unsigned int maxIter = 0;
uint g_MaxLoopIterCount = c_hashParams.m_hashMaxCollisionLinkedListSize;
#pragma unroll 1
while (maxIter < g_MaxLoopIterCount) {
curr = d_hash[i];
//found that dude that we need/want to delete
if (curr.pos.x == sdfBlock.x && curr.pos.y == sdfBlock.y && curr.pos.z == sdfBlock.z &&
curr.ptr != FREE_ENTRY) {
//int prevValue = 0;
//InterlockedExchange(bucketMutex[h], LOCK_ENTRY, prevValue); //lock the hash bucket
int prevValue = atomicExch(&d_hashBucketMutex[h], LOCK_ENTRY);
if (prevValue == LOCK_ENTRY) return false;
if (prevValue != LOCK_ENTRY) {
const uint linBlockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
appendHeap(curr.ptr / linBlockSize);
//heapAppend.Append(curr.ptr / linBlockSize);
deleteHashEntry(i);
HashEntry prev = d_hash[prevIdx];
prev.offset = curr.offset;
//setHashEntry(hash, prevIdx, prev);
d_hash[prevIdx] = prev;
return true;
}
}
if (curr.offset == 0) { //we have found the end of the list
return false; //should actually never happen because we need to find that guy before
}
prevIdx = i;
i = idxLastEntryInBucket + curr.offset; //go to next element in the list
i %= (HASH_BUCKET_SIZE * c_hashParams.m_hashNumBuckets); //check for overflow
maxIter++;
}
#endif // HANDLE_COLLSISION
return false;
}
#define T_PER_BLOCK 16
texture<float, cudaTextureType2D, cudaReadModeElementType> depthTextureRef;
texture<uchar4, cudaTextureType2D, cudaReadModeElementType> colorTextureRef;
void bindInputDepthColorTextures(const CUDAFrame& frame)
{
int width = frame.imageWidth, height = frame.imageHeight;
cutilSafeCall(cudaBindTexture2D(0, &depthTextureRef, frame.depthData, &depthTextureRef.channelDesc, width, height, sizeof(float)*width));
cutilSafeCall(cudaBindTexture2D(0, &colorTextureRef, frame.colorData, &colorTextureRef.channelDesc, width, height, sizeof(uchar4)*width));
depthTextureRef.filterMode = cudaFilterModePoint;
colorTextureRef.filterMode = cudaFilterModePoint;
}
__global__ void resetHeapKernel(HashDataStruct hashData)
{
const HashParams& hashParams = c_hashParams;
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx == 0) {
hashData.d_heapCounter[0] = hashParams.m_numSDFBlocks - 1; //points to the last element of the array
}
if (idx < hashParams.m_numSDFBlocks) {
hashData.d_heap[idx] = hashParams.m_numSDFBlocks - idx - 1;
uint blockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
uint base_idx = idx * blockSize;
for (uint i = 0; i < blockSize; i++) {
hashData.deleteVoxel(base_idx+i);
}
}
}
__global__ void resetHashKernel(HashDataStruct hashData)
{
const HashParams& hashParams = c_hashParams;
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < hashParams.m_hashNumBuckets * HASH_BUCKET_SIZE) {
hashData.deleteHashEntry(hashData.d_hash[idx]);
hashData.deleteHashEntry(hashData.d_hashCompactified[idx]);
}
}
__global__ void resetHashBucketMutexKernel(HashDataStruct hashData)
{
const HashParams& hashParams = c_hashParams;
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < hashParams.m_hashNumBuckets) {
hashData.d_hashBucketMutex[idx] = FREE_ENTRY;
}
}
void resetCUDA(HashDataStruct& hashData, const HashParams& hashParams)
{
{
//resetting the heap and SDF blocks
const dim3 gridSize((hashParams.m_numSDFBlocks + (T_PER_BLOCK*T_PER_BLOCK) - 1)/(T_PER_BLOCK*T_PER_BLOCK), 1);
const dim3 blockSize((T_PER_BLOCK*T_PER_BLOCK), 1);
resetHeapKernel<<<gridSize, blockSize>>>(hashData);
CUDA_CHECKED_NO_ERROR();
}
{
//resetting the hash
const dim3 gridSize((HASH_BUCKET_SIZE * hashParams.m_hashNumBuckets + (T_PER_BLOCK*T_PER_BLOCK) - 1)/(T_PER_BLOCK*T_PER_BLOCK), 1);
const dim3 blockSize((T_PER_BLOCK*T_PER_BLOCK), 1);
resetHashKernel<<<gridSize, blockSize>>>(hashData);
CUDA_CHECKED_NO_ERROR();
}
{
//resetting the mutex
const dim3 gridSize((hashParams.m_hashNumBuckets + (T_PER_BLOCK*T_PER_BLOCK) - 1)/(T_PER_BLOCK*T_PER_BLOCK), 1);
const dim3 blockSize((T_PER_BLOCK*T_PER_BLOCK), 1);
resetHashBucketMutexKernel<<<gridSize, blockSize>>>(hashData);
CUDA_CHECKED_NO_ERROR();
}
}
void resetHashBucketMutexCUDA(HashDataStruct& hashData, const HashParams& hashParams)
{
const dim3 gridSize((hashParams.m_hashNumBuckets + (T_PER_BLOCK*T_PER_BLOCK) - 1)/(T_PER_BLOCK*T_PER_BLOCK), 1);
const dim3 blockSize((T_PER_BLOCK*T_PER_BLOCK), 1);
resetHashBucketMutexKernel<<<gridSize, blockSize>>>(hashData);
CUDA_CHECKED_NO_ERROR();
}
__device__
unsigned int linearizeChunkPos(const int3& chunkPos)
{
int3 p = chunkPos-c_hashParams.m_streamingMinGridPos;
return p.z * c_hashParams.m_streamingGridDimensions.x * c_hashParams.m_streamingGridDimensions.y +
p.y * c_hashParams.m_streamingGridDimensions.x +
p.x;
}
__device__
int3 worldToChunks(const float3& posWorld)
{
float3 p;
p.x = posWorld.x/c_hashParams.m_streamingVoxelExtents.x;
p.y = posWorld.y/c_hashParams.m_streamingVoxelExtents.y;
p.z = posWorld.z/c_hashParams.m_streamingVoxelExtents.z;
float3 s;
s.x = (float)sign(p.x);
s.y = (float)sign(p.y);
s.z = (float)sign(p.z);
return make_int3(p+s*0.5f);
}
__global__ void allocKernel(HashDataStruct hashData, CUDAFrame frame)
{
const HashParams& hashParams = c_hashParams;
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < frame.imageWidth && y < frame.imageHeight)
{
float d = tex2D(depthTextureRef, x, y);
if (d == MINF || d == 0.0f) return;
if (d >= hashParams.m_maxIntegrationDistance) return;
float t = hashData.getTruncation(d);
float minDepth = min(hashParams.m_maxIntegrationDistance, d-t);
float maxDepth = min(hashParams.m_maxIntegrationDistance, d+t);
if (minDepth >= maxDepth) return;
float3 rayMin = frame.unProject(x, y, minDepth);
rayMin = hashParams.m_rigidTransform * rayMin;
float3 rayMax = frame.unProject(x, y, maxDepth);
rayMax = hashParams.m_rigidTransform * rayMax;
float3 rayDir = normalize(rayMax - rayMin);
int3 idCurrentVoxel = hashData.worldToSDFBlock(rayMin);
int3 idEnd = hashData.worldToSDFBlock(rayMax);
float3 step = make_float3(sign(rayDir));
float3 boundaryPos = hashData.SDFBlockToWorld(idCurrentVoxel+make_int3(clamp(step, 0.0, 1.0f)))-0.5f*hashParams.m_virtualVoxelSize;
float3 tMax = (boundaryPos-rayMin)/rayDir;
float3 tDelta = (step*SDF_BLOCK_SIZE*hashParams.m_virtualVoxelSize)/rayDir;
int3 idBound = make_int3(make_float3(idEnd)+step);
//#pragma unroll
//for(int c = 0; c < 3; c++) {
// if (rayDir[c] == 0.0f) { tMax[c] = PINF; tDelta[c] = PINF; }
// if (boundaryPos[c] - rayMin[c] == 0.0f) { tMax[c] = PINF; tDelta[c] = PINF; }
//}
if (rayDir.x == 0.0f) { tMax.x = PINF; tDelta.x = PINF; }
if (boundaryPos.x - rayMin.x == 0.0f) { tMax.x = PINF; tDelta.x = PINF; }
if (rayDir.y == 0.0f) { tMax.y = PINF; tDelta.y = PINF; }
if (boundaryPos.y - rayMin.y == 0.0f) { tMax.y = PINF; tDelta.y = PINF; }
if (rayDir.z == 0.0f) { tMax.z = PINF; tDelta.z = PINF; }
if (boundaryPos.z - rayMin.z == 0.0f) { tMax.z = PINF; tDelta.z = PINF; }
unsigned int iter = 0; // iter < g_MaxLoopIterCount
unsigned int g_MaxLoopIterCount = 1024; //TODO MATTHIAS MOVE TO GLOBAL APP STATE
#pragma unroll 1
while(iter < g_MaxLoopIterCount) {
//check if it's in the frustum and not checked out
if (hashData.isSDFBlockInCameraFrustumApprox(idCurrentVoxel, frame)) {
hashData.allocBlock(idCurrentVoxel);
}
// Traverse voxel grid
if(tMax.x < tMax.y && tMax.x < tMax.z) {
idCurrentVoxel.x += step.x;
if(idCurrentVoxel.x == idBound.x) return;
tMax.x += tDelta.x;
}
else if(tMax.z < tMax.y) {
idCurrentVoxel.z += step.z;
if(idCurrentVoxel.z == idBound.z) return;
tMax.z += tDelta.z;
}
else {
idCurrentVoxel.y += step.y;
if(idCurrentVoxel.y == idBound.y) return;
tMax.y += tDelta.y;
}
iter++;
}
}
}
void allocCUDA(HashDataStruct& hashData, const HashParams& hashParams, const CUDAFrame& frame)
{
const dim3 gridSize((frame.imageWidth + T_PER_BLOCK - 1)/T_PER_BLOCK, (frame.imageHeight + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
allocKernel<<<gridSize, blockSize>>>(hashData, frame);
CUDA_CHECKED_NO_ERROR();
}
__global__ void fillDecisionArrayKernel(HashDataStruct hashData, CUDAFrame frame)
{
const HashParams& hashParams = c_hashParams;
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < hashParams.m_hashNumBuckets * HASH_BUCKET_SIZE) {
hashData.d_hashDecision[idx] = 0;
if (hashData.d_hash[idx].ptr != FREE_ENTRY) {
if (hashData.isSDFBlockInCameraFrustumApprox(hashData.d_hash[idx].pos, frame))
{
hashData.d_hashDecision[idx] = 1; //yes
}
}
}
}
void fillDecisionArrayCUDA(HashDataStruct& hashData, const HashParams& hashParams, const CUDAFrame& frame)
{
const dim3 gridSize((HASH_BUCKET_SIZE * hashParams.m_hashNumBuckets + (T_PER_BLOCK*T_PER_BLOCK) - 1)/(T_PER_BLOCK*T_PER_BLOCK), 1);
const dim3 blockSize((T_PER_BLOCK*T_PER_BLOCK), 1);
fillDecisionArrayKernel<<<gridSize, blockSize>>>(hashData, frame);
CUDA_CHECKED_NO_ERROR();
}
__global__ void compactifyHashKernel(HashDataStruct hashData)
{
const HashParams& hashParams = c_hashParams;
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < hashParams.m_hashNumBuckets * HASH_BUCKET_SIZE) {
if (hashData.d_hashDecision[idx] == 1) {
hashData.d_hashCompactified[hashData.d_hashDecisionPrefix[idx]-1] = hashData.d_hash[idx];
}
}
}
void compactifyHashCUDA(HashDataStruct& hashData, const HashParams& hashParams)
{
const dim3 gridSize((HASH_BUCKET_SIZE * hashParams.m_hashNumBuckets + (T_PER_BLOCK*T_PER_BLOCK) - 1)/(T_PER_BLOCK*T_PER_BLOCK), 1);
const dim3 blockSize((T_PER_BLOCK*T_PER_BLOCK), 1);
compactifyHashKernel<<<gridSize, blockSize>>>(hashData);
CUDA_CHECKED_NO_ERROR();
}
#define COMPACTIFY_HASH_THREADS_PER_BLOCK 256
//#define COMPACTIFY_HASH_SIMPLE
__global__ void compactifyHashAllInOneKernel(HashDataStruct hashData, CUDAFrame frame)
{
const HashParams& hashParams = c_hashParams;
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ int localCounter;
if (threadIdx.x == 0) localCounter = 0;
__syncthreads();
int addrLocal = -1;
if (idx < hashParams.m_hashNumBuckets * HASH_BUCKET_SIZE) {
if (hashData.d_hash[idx].ptr != FREE_ENTRY) {
if (hashData.isSDFBlockInCameraFrustumApprox(hashData.d_hash[idx].pos, frame))
{
addrLocal = atomicAdd(&localCounter, 1);
}
}
}
__syncthreads();
__shared__ int addrGlobal;
if (threadIdx.x == 0 && localCounter > 0) {
addrGlobal = atomicAdd(hashData.d_hashCompactifiedCounter, localCounter);
}
__syncthreads();
if (addrLocal != -1) {
const unsigned int addr = addrGlobal + addrLocal;
hashData.d_hashCompactified[addr] = hashData.d_hash[idx];
}
}
unsigned int compactifyHashAllInOneCUDA(HashDataStruct& hashData, const HashParams& hashParams, const CUDAFrame &frame)
{
const unsigned int threadsPerBlock = COMPACTIFY_HASH_THREADS_PER_BLOCK;
const dim3 gridSize((HASH_BUCKET_SIZE * hashParams.m_hashNumBuckets + threadsPerBlock - 1) / threadsPerBlock, 1);
const dim3 blockSize(threadsPerBlock, 1);
cutilSafeCall(cudaMemset(hashData.d_hashCompactifiedCounter, 0, sizeof(int)));
compactifyHashAllInOneKernel << <gridSize, blockSize >> >(hashData, frame);
unsigned int res = 0;
cutilSafeCall(cudaMemcpy(&res, hashData.d_hashCompactifiedCounter, sizeof(unsigned int), cudaMemcpyDeviceToHost));
CUDA_CHECKED_NO_ERROR();
return res;
}
template<bool deIntegrate = false>
__global__ void integrateDepthMapKernel(HashDataStruct hashData, CUDAFrame frame) {
const HashParams& hashParams = c_hashParams;
const HashEntry& entry = hashData.d_hashCompactified[blockIdx.x];
int3 pi_base = hashData.SDFBlockToVirtualVoxelPos(entry.pos);
uint i = threadIdx.x; //inside of an SDF block
int3 pi = pi_base + make_int3(hashData.delinearizeVoxelIndex(i));
float3 pf = hashData.virtualVoxelPosToWorld(pi);
pf = hashParams.m_rigidTransformInverse * pf;
float3 pixel = frame.project(pf);
uint2 screenPos = make_uint2((uint)pixel.x, (uint)pixel.y);
if (screenPos.x < frame.imageWidth && screenPos.y < frame.imageHeight) { //on screen
//float depth = g_InputDepth[screenPos];
float depth = tex2D(depthTextureRef, screenPos.x, screenPos.y);
uchar4 color_uc = tex2D(colorTextureRef, screenPos.x, screenPos.y);
float3 color = make_float3(color_uc.x, color_uc.y, color_uc.z);
if (color.x != MINF && depth != MINF) { // valid depth and color
//if (depth != MINF) { //valid depth
if (depth < hashParams.m_maxIntegrationDistance) {
float depthZeroOne = frame.cameraToProjZ(depth);
float sdf = depth - pf.z;
float truncation = hashData.getTruncation(depth);
//if (sdf > -truncation)
if (abs(sdf) < truncation)
{
if (sdf >= 0.0f) {
sdf = fminf(truncation, sdf);
} else {
sdf = fmaxf(-truncation, sdf);
}
float weightUpdate = max(hashParams.m_integrationWeightSample * 1.5f * (1.0f-depthZeroOne), 1.0f);
weightUpdate = 1.0f; //TODO remove that again
Voxel curr; //construct current voxel
curr.sdf = sdf;
curr.weight = weightUpdate;
curr.color = make_uchar4(color.x, color.y, color.z, 255);
uint idx = entry.ptr + i;
const Voxel& oldVoxel = hashData.d_SDFBlocks[idx];
Voxel newVoxel;
float3 oldColor = make_float3(oldVoxel.color.x, oldVoxel.color.y, oldVoxel.color.z);
float3 currColor = make_float3(curr.color.x, curr.color.y, curr.color.z);
if (!deIntegrate) { //integration
//hashData.combineVoxel(hashData.d_SDFBlocks[idx], curr, newVoxel);
float3 res;
if (oldVoxel.weight == 0) res = currColor;
//else res = (currColor + oldColor) / 2;
else res = 0.2f * currColor + 0.8f * oldColor;
//float3 res = (currColor*curr.weight + oldColor*oldVoxel.weight) / (curr.weight + oldVoxel.weight);
res = make_float3(round(res.x), round(res.y), round(res.z));
res = fmaxf(make_float3(0.0f), fminf(res, make_float3(254.5f)));
//newVoxel.color.x = (uchar)(res.x + 0.5f); newVoxel.color.y = (uchar)(res.y + 0.5f); newVoxel.color.z = (uchar)(res.z + 0.5f);
newVoxel.color = make_uchar4(res.x, res.y, res.z, 255);
newVoxel.sdf = (curr.sdf*curr.weight + oldVoxel.sdf*oldVoxel.weight) / (curr.weight + oldVoxel.weight);
newVoxel.weight = min((float)c_hashParams.m_integrationWeightMax, curr.weight + oldVoxel.weight);
}
else { //deintegration
//float3 res = 2 * c0 - c1;
float3 res = (oldColor*oldVoxel.weight - currColor*curr.weight) / (oldVoxel.weight - curr.weight);
res = make_float3(round(res.x), round(res.y), round(res.z));
res = fmaxf(make_float3(0.0f), fminf(res, make_float3(254.5f)));
//newVoxel.color.x = (uchar)(res.x + 0.5f); newVoxel.color.y = (uchar)(res.y + 0.5f); newVoxel.color.z = (uchar)(res.z + 0.5f);
newVoxel.color = make_uchar4(res.x, res.y, res.z, 255);
newVoxel.sdf = (oldVoxel.sdf*oldVoxel.weight - curr.sdf*curr.weight) / (oldVoxel.weight - curr.weight);
newVoxel.weight = max(0.0f, oldVoxel.weight - curr.weight);
if (newVoxel.weight <= 0.001f) {
newVoxel.sdf = 0.0f;
newVoxel.color = make_uchar4(0,0,0,0);
newVoxel.weight = 0.0f;
}
}
hashData.d_SDFBlocks[idx] = newVoxel;
}
}
}
}
}
void integrateDepthMapCUDA(HashDataStruct& hashData, const HashParams& hashParams, const CUDAFrame& frame)
{
const unsigned int threadsPerBlock = SDF_BLOCK_SIZE*SDF_BLOCK_SIZE*SDF_BLOCK_SIZE;
const dim3 gridSize(hashParams.m_numOccupiedBlocks, 1);
const dim3 blockSize(threadsPerBlock, 1);
integrateDepthMapKernel<false> <<<gridSize, blockSize>>>(hashData, frame);
CUDA_CHECKED_NO_ERROR();
}
void deIntegrateDepthMapCUDA(HashDataStruct& hashData, const HashParams& hashParams, const CUDAFrame& frame)
{
const unsigned int threadsPerBlock = SDF_BLOCK_SIZE*SDF_BLOCK_SIZE*SDF_BLOCK_SIZE;
const dim3 gridSize(hashParams.m_numOccupiedBlocks, 1);
const dim3 blockSize(threadsPerBlock, 1);
integrateDepthMapKernel<true> <<<gridSize, blockSize >>>(hashData, frame);
CUDA_CHECKED_NO_ERROR();
}
__global__ void starveVoxelsKernel(HashDataStruct hashData) {
const uint idx = blockIdx.x;
const HashEntry& entry = hashData.d_hashCompactified[idx];
//is typically exectued only every n'th frame
int weight = hashData.d_SDFBlocks[entry.ptr + threadIdx.x].weight;
weight = max(0, weight-1);
hashData.d_SDFBlocks[entry.ptr + threadIdx.x].weight = weight;
}
void starveVoxelsKernelCUDA(HashDataStruct& hashData, const HashParams& hashParams)
{
const unsigned int threadsPerBlock = SDF_BLOCK_SIZE*SDF_BLOCK_SIZE*SDF_BLOCK_SIZE;
const dim3 gridSize(hashParams.m_numOccupiedBlocks, 1);
const dim3 blockSize(threadsPerBlock, 1);
starveVoxelsKernel<<<gridSize, blockSize>>>(hashData);
CUDA_CHECKED_NO_ERROR();
}
//__shared__ float shared_MinSDF[SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE / 2];
__shared__ uint shared_MaxWeight[SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE / 2];
__global__ void garbageCollectIdentifyKernel(HashDataStruct hashData) {
const unsigned int hashIdx = blockIdx.x;
const HashEntry& entry = hashData.d_hashCompactified[hashIdx];
//uint h = hashData.computeHashPos(entry.pos);
//hashData.d_hashDecision[hashIdx] = 1;
//if (hashData.d_hashBucketMutex[h] == LOCK_ENTRY) return;
//if (entry.ptr == FREE_ENTRY) return; //should never happen since we did compactify before
//const uint linBlockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
const unsigned int idx0 = entry.ptr + 2*threadIdx.x+0;
const unsigned int idx1 = entry.ptr + 2*threadIdx.x+1;
Voxel v0 = hashData.d_SDFBlocks[idx0];
Voxel v1 = hashData.d_SDFBlocks[idx1];
//if (v0.weight == 0) v0.sdf = PINF;
//if (v1.weight == 0) v1.sdf = PINF;
//shared_MinSDF[threadIdx.x] = min(fabsf(v0.sdf), fabsf(v1.sdf)); //init shared memory
shared_MaxWeight[threadIdx.x] = max(v0.weight, v1.weight);
#pragma unroll 1
for (uint stride = 2; stride <= blockDim.x; stride <<= 1) {
__syncthreads();
if ((threadIdx.x & (stride-1)) == (stride-1)) {
//shared_MinSDF[threadIdx.x] = min(shared_MinSDF[threadIdx.x-stride/2], shared_MinSDF[threadIdx.x]);
shared_MaxWeight[threadIdx.x] = max(shared_MaxWeight[threadIdx.x-stride/2], shared_MaxWeight[threadIdx.x]);
}
}
__syncthreads();
if (threadIdx.x == blockDim.x - 1) {
uint maxWeight = shared_MaxWeight[threadIdx.x];
if (maxWeight == 0) {
hashData.d_hashDecision[hashIdx] = 1;
} else {
hashData.d_hashDecision[hashIdx] = 0;
}
}
}
void garbageCollectIdentifyCUDA(HashDataStruct& hashData, const HashParams& hashParams) {
const unsigned int threadsPerBlock = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE / 2;
const dim3 gridSize(hashParams.m_numOccupiedBlocks, 1);
const dim3 blockSize(threadsPerBlock, 1);
garbageCollectIdentifyKernel<<<gridSize, blockSize>>>(hashData);
CUDA_CHECKED_NO_ERROR();
}
__global__ void garbageCollectFreeKernel(HashDataStruct hashData) {
//const uint hashIdx = blockIdx.x;
const uint hashIdx = blockIdx.x*blockDim.x + threadIdx.x;
if (hashIdx < c_hashParams.m_numOccupiedBlocks && hashData.d_hashDecision[hashIdx] != 0) { //decision to delete the hash entry
const HashEntry& entry = hashData.d_hashCompactified[hashIdx];
//if (entry.ptr == FREE_ENTRY) return; //should never happen since we did compactify before
if (hashData.deleteHashEntryElement(entry.pos)) { //delete hash entry from hash (and performs heap append)
const uint linBlockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
#pragma unroll 1
for (uint i = 0; i < linBlockSize; i++) { //clear sdf block: CHECK TODO another kernel?
hashData.deleteVoxel(entry.ptr + i);
}
}
}
}
void garbageCollectFreeCUDA(HashDataStruct& hashData, const HashParams& hashParams) {
const unsigned int threadsPerBlock = T_PER_BLOCK*T_PER_BLOCK;
const dim3 gridSize((hashParams.m_numOccupiedBlocks + threadsPerBlock - 1) / threadsPerBlock, 1);
const dim3 blockSize(threadsPerBlock, 1);
garbageCollectFreeKernel<<<gridSize, blockSize>>>(hashData);
CUDA_CHECKED_NO_ERROR();
}
/** raycast */
__device__ __host__
RayCastData::RayCastData() {
d_depth = NULL;
d_depth4 = NULL;
d_normals = NULL;
d_colors = NULL;
d_vertexBuffer = NULL;
d_rayIntervalSplatMinArray = NULL;
d_rayIntervalSplatMaxArray = NULL;
}
extern "C" void updateConstantRayCastParams(const RayCastParams& params) {
size_t size;
cutilSafeCall(cudaGetSymbolSize(&size, c_rayCastParams));
cutilSafeCall(cudaMemcpyToSymbol(c_rayCastParams, ¶ms, size, 0, cudaMemcpyHostToDevice));
#ifdef DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__host__
void RayCastData::updateParams(const RayCastParams ¶ms) {
updateConstantRayCastParams(params);
}
/////////////////
// Device part //
/////////////////
__device__
const RayCastParams &RayCastData::params() const {
return c_rayCastParams;
}
__device__
float RayCastData::frac(float val) const {
return (val - floorf(val));
}
__device__
float3 RayCastData::frac(const float3 &val) const {
return make_float3(frac(val.x), frac(val.y), frac(val.z));
}
__device__
bool RayCastData::trilinearInterpolationSimpleFastFast(const HashDataStruct &hash, const float3 &pos, float &dist,
uchar3 &color) const {
const float oSet = c_hashParams.m_virtualVoxelSize;
const float3 posDual = pos - make_float3(oSet / 2.0f, oSet / 2.0f, oSet / 2.0f);
float3 weight = frac(hash.worldToVirtualVoxelPosFloat(pos));
dist = 0.0f;
float3 colorFloat = make_float3(0.0f, 0.0f, 0.0f);
Voxel v = hash.getVoxel(posDual + make_float3(0.0f, 0.0f, 0.0f));
if (v.weight == 0) return false;
float3 vColor = make_float3(v.color.x, v.color.y, v.color.z);
dist += (1.0f - weight.x) * (1.0f - weight.y) * (1.0f - weight.z) * v.sdf;
colorFloat += (1.0f - weight.x) * (1.0f - weight.y) * (1.0f - weight.z) * vColor;
v = hash.getVoxel(posDual + make_float3(oSet, 0.0f, 0.0f));
if (v.weight == 0) return false;
vColor = make_float3(v.color.x, v.color.y, v.color.z);
dist += weight.x * (1.0f - weight.y) * (1.0f - weight.z) * v.sdf;
colorFloat += weight.x * (1.0f - weight.y) * (1.0f - weight.z) * vColor;
v = hash.getVoxel(posDual + make_float3(0.0f, oSet, 0.0f));
if (v.weight == 0) return false;
vColor = make_float3(v.color.x, v.color.y, v.color.z);
dist += (1.0f - weight.x) * weight.y * (1.0f - weight.z) * v.sdf;
colorFloat += (1.0f - weight.x) * weight.y * (1.0f - weight.z) * vColor;
v = hash.getVoxel(posDual + make_float3(0.0f, 0.0f, oSet));
if (v.weight == 0) return false;
vColor = make_float3(v.color.x, v.color.y, v.color.z);
dist += (1.0f - weight.x) * (1.0f - weight.y) * weight.z * v.sdf;
colorFloat += (1.0f - weight.x) * (1.0f - weight.y) * weight.z * vColor;
v = hash.getVoxel(posDual + make_float3(oSet, oSet, 0.0f));
if (v.weight == 0) return false;
vColor = make_float3(v.color.x, v.color.y, v.color.z);
dist += weight.x * weight.y * (1.0f - weight.z) * v.sdf;
colorFloat += weight.x * weight.y * (1.0f - weight.z) * vColor;
v = hash.getVoxel(posDual + make_float3(0.0f, oSet, oSet));
if (v.weight == 0) return false;
vColor = make_float3(v.color.x, v.color.y, v.color.z);
dist += (1.0f - weight.x) * weight.y * weight.z * v.sdf;
colorFloat += (1.0f - weight.x) * weight.y * weight.z * vColor;
v = hash.getVoxel(posDual + make_float3(oSet, 0.0f, oSet));
if (v.weight == 0) return false;
vColor = make_float3(v.color.x, v.color.y, v.color.z);
dist += weight.x * (1.0f - weight.y) * weight.z * v.sdf;
colorFloat += weight.x * (1.0f - weight.y) * weight.z * vColor;
v = hash.getVoxel(posDual + make_float3(oSet, oSet, oSet));
if (v.weight == 0) return false;
vColor = make_float3(v.color.x, v.color.y, v.color.z);
dist += weight.x * weight.y * weight.z * v.sdf;
colorFloat += weight.x * weight.y * weight.z * vColor;
color = make_uchar3(colorFloat.x, colorFloat.y, colorFloat.z);//v.color;
return true;
}
__device__
float RayCastData::findIntersectionLinear(float tNear, float tFar, float dNear, float dFar) const {
return tNear + (dNear / (dNear - dFar)) * (tFar - tNear);
}
// d0 near, d1 far
__device__
bool RayCastData::findIntersectionBisection(const HashDataStruct &hash, const float3 &worldCamPos, const float3 &worldDir,
float d0, float r0, float d1, float r1, float &alpha, uchar3 &color) const {
float a = r0;
float aDist = d0;
float b = r1;
float bDist = d1;
float c = 0.0f;
#pragma unroll 1
for (uint i = 0; i < nIterationsBisection; i++) {
c = findIntersectionLinear(a, b, aDist, bDist);
float cDist;
if (!trilinearInterpolationSimpleFastFast(hash, worldCamPos + c * worldDir, cDist, color)) return false;
if (aDist * cDist > 0.0) {
a = c;
aDist = cDist;
}
else {
b = c;
bDist = cDist;
}
}
alpha = c;
return true;
}
__device__
float3 RayCastData::gradientForPoint(const HashDataStruct &hash, const float3 &pos) const {
const float voxelSize = c_hashParams.m_virtualVoxelSize;
float3 offset = make_float3(voxelSize, voxelSize, voxelSize);
float distp00;
uchar3 colorp00;
trilinearInterpolationSimpleFastFast(hash, pos - make_float3(0.5f * offset.x, 0.0f, 0.0f), distp00, colorp00);
float dist0p0;
uchar3 color0p0;
trilinearInterpolationSimpleFastFast(hash, pos - make_float3(0.0f, 0.5f * offset.y, 0.0f), dist0p0, color0p0);
float dist00p;
uchar3 color00p;
trilinearInterpolationSimpleFastFast(hash, pos - make_float3(0.0f, 0.0f, 0.5f * offset.z), dist00p, color00p);
float dist100;
uchar3 color100;
trilinearInterpolationSimpleFastFast(hash, pos + make_float3(0.5f * offset.x, 0.0f, 0.0f), dist100, color100);
float dist010;
uchar3 color010;
trilinearInterpolationSimpleFastFast(hash, pos + make_float3(0.0f, 0.5f * offset.y, 0.0f), dist010, color010);
float dist001;
uchar3 color001;
trilinearInterpolationSimpleFastFast(hash, pos + make_float3(0.0f, 0.0f, 0.5f * offset.z), dist001, color001);
float3 grad = make_float3((distp00 - dist100) / offset.x, (dist0p0 - dist010) / offset.y,
(dist00p - dist001) / offset.z);
float l = length(grad);
if (l == 0.0f) {
return make_float3(0.0f, 0.0f, 0.0f);
}
return -grad / l;
}
__device__
void RayCastData::traverseCoarseGridSimpleSampleAll(const HashDataStruct& hash, const float3& worldCamPos, const float3& worldDir, const float3& camDir, const int3& dTid, float minInterval, float maxInterval) const
{
const RayCastParams& rayCastParams = c_rayCastParams;
// Last Sample
RayCastSample lastSample; lastSample.sdf = 0.0f; lastSample.alpha = 0.0f; lastSample.weight = 0; // lastSample.color = int3(0, 0, 0);
const float depthToRayLength = 1.0f/camDir.z; // scale factor to convert from depth to ray length
float rayCurrent = depthToRayLength * max(rayCastParams.m_minDepth, minInterval); // Convert depth to raylength
float rayEnd = depthToRayLength * min(rayCastParams.m_maxDepth, maxInterval); // Convert depth to raylength
//float rayCurrent = depthToRayLength * rayCastParams.m_minDepth; // Convert depth to raylength
//float rayEnd = depthToRayLength * rayCastParams.m_maxDepth; // Convert depth to raylength
#pragma unroll 1
while(rayCurrent < rayEnd)
{
float3 currentPosWorld = worldCamPos+rayCurrent*worldDir;
float dist; uchar3 color;
if(trilinearInterpolationSimpleFastFast(hash, currentPosWorld, dist, color))
{
if(lastSample.weight > 0 && lastSample.sdf > 0.0f && dist < 0.0f)// current sample is always valid here
//if(lastSample.weight > 0 && ((lastSample.sdf > 0.0f && dist < 0.0f) || (lastSample.sdf < 0.0f && dist > 0.0f))) //hack for top down video
{
float alpha; // = findIntersectionLinear(lastSample.alpha, rayCurrent, lastSample.sdf, dist);
uchar3 color2;
bool b = findIntersectionBisection(hash, worldCamPos, worldDir, lastSample.sdf, lastSample.alpha, dist, rayCurrent, alpha, color2);
float3 currentIso = worldCamPos+alpha*worldDir;
if(b && abs(lastSample.sdf - dist) < rayCastParams.m_thresSampleDist)
{
if(abs(dist) < rayCastParams.m_thresDist)
{
float depth = alpha / depthToRayLength; // Convert ray length to depth depthToRayLength
d_depth[dTid.y*rayCastParams.m_width+dTid.x] = depth;
d_depth4[dTid.y*rayCastParams.m_width+dTid.x] = make_float4(depthToCamera(dTid.x, dTid.y, depth), 1.0f);
d_colors[dTid.y*rayCastParams.m_width+dTid.x] = make_float4(color2.x/255.f, color2.y/255.f, color2.z/255.f, 1.0f);
if(rayCastParams.m_useGradients)
{
float3 normal = make_float3(0,0,0)-gradientForPoint(hash, currentIso);
float4 n = rayCastParams.m_viewMatrix * make_float4(normal, 0.0f);
d_normals[dTid.y*rayCastParams.m_width+dTid.x] = make_float4(n.x, n.y, n.z, 1.0f);
}
return;
}
}
}
lastSample.sdf = dist;
lastSample.alpha = rayCurrent;
// lastSample.color = color;
lastSample.weight = 1;
rayCurrent += rayCastParams.m_rayIncrement;
} else {
lastSample.weight = 0;
rayCurrent += rayCastParams.m_rayIncrement;
}
}
}
__global__ void computeNormalsDevice(float4* d_output, float4* d_input, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x >= width || y >= height) return;
d_output[y*width+x] = make_float4(MINF, MINF, MINF, MINF);
if(x > 0 && x < width-1 && y > 0 && y < height-1)
{
const float4 CC = d_input[(y+0)*width+(x+0)];
const float4 PC = d_input[(y+1)*width+(x+0)];
const float4 CP = d_input[(y+0)*width+(x+1)];
const float4 MC = d_input[(y-1)*width+(x+0)];
const float4 CM = d_input[(y+0)*width+(x-1)];
if(CC.x != MINF && PC.x != MINF && CP.x != MINF && MC.x != MINF && CM.x != MINF)
{
const float3 n = cross(make_float3(PC)-make_float3(MC), make_float3(CP)-make_float3(CM));
const float l = length(n);
if(l > 0.0f)
{
d_output[y*width+x] = make_float4(n/-l, 1.0f);
}
}
}
}
extern "C" void computeNormals(float4* d_output, float4* d_input, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1)/T_PER_BLOCK, (height + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
computeNormalsDevice<<<gridSize, blockSize>>>(d_output, d_input, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
texture<float, cudaTextureType2D, cudaReadModeElementType> rayMinTextureRef;
texture<float, cudaTextureType2D, cudaReadModeElementType> rayMaxTextureRef;
__global__ void renderKernel(HashDataStruct hashData, RayCastData rayCastData)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
const RayCastParams& rayCastParams = c_rayCastParams;
if (x < rayCastParams.m_width && y < rayCastParams.m_height) {
rayCastData.d_depth[y*rayCastParams.m_width+x] = MINF;
rayCastData.d_depth4[y*rayCastParams.m_width+x] = make_float4(MINF,MINF,MINF,MINF);
rayCastData.d_normals[y*rayCastParams.m_width+x] = make_float4(MINF,MINF,MINF,MINF);
rayCastData.d_colors[y*rayCastParams.m_width+x] = make_float4(MINF,MINF,MINF,MINF);
float3 camDir = normalize(RayCastData::depthToCamera(x, y, 1.0f));
float3 worldCamPos = rayCastParams.m_viewMatrixInverse * make_float3(0.0f, 0.0f, 0.0f);
float4 w = rayCastParams.m_viewMatrixInverse * make_float4(camDir, 0.0f);
float3 worldDir = normalize(make_float3(w.x, w.y, w.z));
float minInterval = tex2D(rayMinTextureRef, x, y);
float maxInterval = tex2D(rayMaxTextureRef, x, y);
//float minInterval = rayCastParams.m_minDepth;
//float maxInterval = rayCastParams.m_maxDepth;
//if (minInterval == 0 || minInterval == MINF) minInterval = rayCastParams.m_minDepth;
//if (maxInterval == 0 || maxInterval == MINF) maxInterval = rayCastParams.m_maxDepth;
//TODO MATTHIAS: shouldn't this return in the case no interval is found?
if (minInterval == 0 || minInterval == MINF) return;
if (maxInterval == 0 || maxInterval == MINF) return;
minInterval = max(minInterval, rayCastParams.m_minDepth);
maxInterval = min(maxInterval, rayCastParams.m_maxDepth);
// debugging
//if (maxInterval < minInterval) {
// printf("ERROR (%d,%d): [ %f, %f ]\n", x, y, minInterval, maxInterval);
//}
rayCastData.traverseCoarseGridSimpleSampleAll(hashData, worldCamPos, worldDir, camDir, make_int3(x,y,1), minInterval, maxInterval);
}
}
extern "C" void renderCS(const HashDataStruct& hashData, const RayCastData &rayCastData, const RayCastParams &rayCastParams)
{
const dim3 gridSize((rayCastParams.m_width + T_PER_BLOCK - 1)/T_PER_BLOCK, (rayCastParams.m_height + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
cudaBindTexture2D(0, &rayMinTextureRef, rayCastData.d_rayIntervalSplatMinArray, &depthTextureRef.channelDesc, rayCastParams.m_width, rayCastParams.m_height, sizeof(float)*rayCastParams.m_width);
cudaBindTexture2D(0, &rayMaxTextureRef, rayCastData.d_rayIntervalSplatMaxArray, &depthTextureRef.channelDesc, rayCastParams.m_width, rayCastParams.m_height, sizeof(float)*rayCastParams.m_width);
renderKernel<<<gridSize, blockSize>>>(hashData, rayCastData);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
/////////////////////////////////////////////////////////////////////////
// ray interval splatting
/////////////////////////////////////////////////////////////////////////
__global__ void resetRayIntervalSplatKernel(RayCastData data)
{
uint idx = blockIdx.x + blockIdx.y * NUM_GROUPS_X;
data.d_vertexBuffer[idx] = make_float4(MINF);
}
extern "C" void resetRayIntervalSplatCUDA(RayCastData& data, const RayCastParams& params)
{
const dim3 gridSize(NUM_GROUPS_X, (params.m_maxNumVertices + NUM_GROUPS_X - 1) / NUM_GROUPS_X, 1); // ! todo check if need third dimension?
const dim3 blockSize(1, 1, 1);
resetRayIntervalSplatKernel<<<gridSize, blockSize>>>(data);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void rayIntervalSplatKernel(HashDataStruct hashData, RayCastData rayCastData)
{
uint idx = blockIdx.x + blockIdx.y * NUM_GROUPS_X;
const HashEntry& entry = hashData.d_hashCompactified[idx];
const RayCastParams& rayCastParams = c_rayCastParams;
if (entry.ptr != FREE_ENTRY) {
float3 posWorld = hashData.virtualVoxelPosToWorld(hashData.SDFBlockToVirtualVoxelPos(entry.pos)) +
c_hashParams.m_virtualVoxelSize * 0.5f * (SDF_BLOCK_SIZE - 1.0f);
if (rayCastData.isInCameraFrustumApprox(rayCastParams.m_viewMatrixInverse, posWorld)) return;
const RayCastParams ¶ms = c_rayCastParams;
const float4x4& viewMatrix = params.m_viewMatrix;
float3 worldCurrentVoxel = hashData.SDFBlockToWorld(entry.pos);
float3 MINV = worldCurrentVoxel - c_hashParams.m_virtualVoxelSize / 2.0f;
float3 maxv = MINV+SDF_BLOCK_SIZE*c_hashParams.m_virtualVoxelSize;
//float3 proj000 = DepthCameraData::cameraToKinectProj(viewMatrix * make_float3(MINV.x, MINV.y, MINV.z));
//float3 proj100 = DepthCameraData::cameraToKinectProj(viewMatrix * make_float3(maxv.x, MINV.y, MINV.z));
//float3 proj010 = DepthCameraData::cameraToKinectProj(viewMatrix * make_float3(MINV.x, maxv.y, MINV.z));
//float3 proj001 = DepthCameraData::cameraToKinectProj(viewMatrix * make_float3(MINV.x, MINV.y, maxv.z));
//float3 proj110 = DepthCameraData::cameraToKinectProj(viewMatrix * make_float3(maxv.x, maxv.y, MINV.z));
//float3 proj011 = DepthCameraData::cameraToKinectProj(viewMatrix * make_float3(MINV.x, maxv.y, maxv.z));
//float3 proj101 = DepthCameraData::cameraToKinectProj(viewMatrix * make_float3(maxv.x, MINV.y, maxv.z));
//float3 proj111 = DepthCameraData::cameraToKinectProj(viewMatrix * make_float3(maxv.x, maxv.y, maxv.z));
float3 proj000 = RayCastData::cameraToDepthProj(viewMatrix * make_float3(MINV.x, MINV.y, MINV.z));
float3 proj100 = RayCastData::cameraToDepthProj(viewMatrix * make_float3(maxv.x, MINV.y, MINV.z));
float3 proj010 = RayCastData::cameraToDepthProj(viewMatrix * make_float3(MINV.x, maxv.y, MINV.z));
float3 proj001 = RayCastData::cameraToDepthProj(viewMatrix * make_float3(MINV.x, MINV.y, maxv.z));
float3 proj110 = RayCastData::cameraToDepthProj(viewMatrix * make_float3(maxv.x, maxv.y, MINV.z));
float3 proj011 = RayCastData::cameraToDepthProj(viewMatrix * make_float3(MINV.x, maxv.y, maxv.z));
float3 proj101 = RayCastData::cameraToDepthProj(viewMatrix * make_float3(maxv.x, MINV.y, maxv.z));
float3 proj111 = RayCastData::cameraToDepthProj(viewMatrix * make_float3(maxv.x, maxv.y, maxv.z));
// Tree Reduction Min
float3 min00 = fminf(proj000, proj100);
float3 min01 = fminf(proj010, proj001);
float3 min10 = fminf(proj110, proj011);
float3 min11 = fminf(proj101, proj111);
float3 min0 = fminf(min00, min01);
float3 min1 = fminf(min10, min11);
float3 minFinal = fminf(min0, min1);
// Tree Reduction Max
float3 max00 = fmaxf(proj000, proj100);
float3 max01 = fmaxf(proj010, proj001);
float3 max10 = fmaxf(proj110, proj011);
float3 max11 = fmaxf(proj101, proj111);
float3 max0 = fmaxf(max00, max01);
float3 max1 = fmaxf(max10, max11);
float3 maxFinal = fmaxf(max0, max1);
float depth = maxFinal.z;
float * rayArray = rayCastData.d_rayIntervalSplatMaxArray;
if(params.m_splatMinimum == 1) {
depth = minFinal.z;
rayArray = rayCastData.d_rayIntervalSplatMinArray;
}
float depthWorld = RayCastData::depthProjToCameraZ(depth);
for(uint x=(uint)ceil(minFinal.x); x<maxFinal.x&&x<rayCastParams.m_width; x++) {
for(uint y=(uint)ceil(minFinal.y); y<maxFinal.y&&y<rayCastParams.m_height; y++) {
rayArray[y*rayCastParams.m_width+x] = depth;
}
}
}
}
extern "C" void rayIntervalSplatCUDA(const HashDataStruct& hashData, const RayCastData &rayCastData, const RayCastParams &rayCastParams)
{
const dim3 gridSize(NUM_GROUPS_X, (rayCastParams.m_numOccupiedSDFBlocks + NUM_GROUPS_X - 1) / NUM_GROUPS_X, 1);
const dim3 blockSize(1, 1, 1);
rayIntervalSplatKernel<<<gridSize, blockSize>>>(hashData, rayCastData);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
/** cube */
__device__ __host__
MarchingCubesData::MarchingCubesData() {
d_params = NULL;
d_triangles = NULL;
d_numTriangles = NULL;
m_bIsOnGPU = false;
}
/////////////////
// Device part //
/////////////////
__device__
void MarchingCubesData::extractIsoSurfaceAtPosition(const float3 &worldPos, const HashDataStruct &hashData,
const RayCastData &rayCastData) {
const HashParams &hashParams = c_hashParams;
const MarchingCubesParams ¶ms = *d_params;
if (params.m_boxEnabled == 1) {
if (!isInBoxAA(params.m_minCorner, params.m_maxCorner, worldPos)) return;
}
const float isolevel = 0.0f;
const float P = hashParams.m_virtualVoxelSize / 2.0f;
const float M = -P;
float3 p000 = worldPos + make_float3(M, M, M);
float dist000;
uchar3 color000;
bool valid000 = rayCastData.trilinearInterpolationSimpleFastFast(hashData, p000, dist000, color000);
float3 p100 = worldPos + make_float3(P, M, M);
float dist100;
uchar3 color100;
bool valid100 = rayCastData.trilinearInterpolationSimpleFastFast(hashData, p100, dist100, color100);
float3 p010 = worldPos + make_float3(M, P, M);
float dist010;
uchar3 color010;
bool valid010 = rayCastData.trilinearInterpolationSimpleFastFast(hashData, p010, dist010, color010);
float3 p001 = worldPos + make_float3(M, M, P);
float dist001;
uchar3 color001;
bool valid001 = rayCastData.trilinearInterpolationSimpleFastFast(hashData, p001, dist001, color001);
float3 p110 = worldPos + make_float3(P, P, M);
float dist110;
uchar3 color110;
bool valid110 = rayCastData.trilinearInterpolationSimpleFastFast(hashData, p110, dist110, color110);
float3 p011 = worldPos + make_float3(M, P, P);
float dist011;
uchar3 color011;
bool valid011 = rayCastData.trilinearInterpolationSimpleFastFast(hashData, p011, dist011, color011);
float3 p101 = worldPos + make_float3(P, M, P);
float dist101;
uchar3 color101;
bool valid101 = rayCastData.trilinearInterpolationSimpleFastFast(hashData, p101, dist101, color101);
float3 p111 = worldPos + make_float3(P, P, P);
float dist111;
uchar3 color111;
bool valid111 = rayCastData.trilinearInterpolationSimpleFastFast(hashData, p111, dist111, color111);
if (!valid000 || !valid100 || !valid010 || !valid001 || !valid110 || !valid011 || !valid101 || !valid111) return;
uint cubeindex = 0;
if (dist010 < isolevel) cubeindex += 1;
if (dist110 < isolevel) cubeindex += 2;
if (dist100 < isolevel) cubeindex += 4;
if (dist000 < isolevel) cubeindex += 8;
if (dist011 < isolevel) cubeindex += 16;
if (dist111 < isolevel) cubeindex += 32;
if (dist101 < isolevel) cubeindex += 64;
if (dist001 < isolevel) cubeindex += 128;
const float thres = params.m_threshMarchingCubes;
float distArray[] = {dist000, dist100, dist010, dist001, dist110, dist011, dist101, dist111};
for (uint k = 0; k < 8; k++) {
for (uint l = 0; l < 8; l++) {
if (distArray[k] * distArray[l] < 0.0f) {
if (abs(distArray[k]) + abs(distArray[l]) > thres) return;
} else {
if (abs(distArray[k] - distArray[l]) > thres) return;
}
}
}
if (abs(dist000) > params.m_threshMarchingCubes2) return;
if (abs(dist100) > params.m_threshMarchingCubes2) return;
if (abs(dist010) > params.m_threshMarchingCubes2) return;
if (abs(dist001) > params.m_threshMarchingCubes2) return;
if (abs(dist110) > params.m_threshMarchingCubes2) return;
if (abs(dist011) > params.m_threshMarchingCubes2) return;
if (abs(dist101) > params.m_threshMarchingCubes2) return;
if (abs(dist111) > params.m_threshMarchingCubes2) return;
if (edgeTable[cubeindex] == 0 || edgeTable[cubeindex] == 255) return; // added by me edgeTable[cubeindex] == 255
Voxel v = hashData.getVoxel(worldPos);
Vertex vertlist[12];
if (edgeTable[cubeindex] & 1) vertlist[0] = vertexInterp(isolevel, p010, p110, dist010, dist110, v.color, v.color);
if (edgeTable[cubeindex] & 2) vertlist[1] = vertexInterp(isolevel, p110, p100, dist110, dist100, v.color, v.color);
if (edgeTable[cubeindex] & 4) vertlist[2] = vertexInterp(isolevel, p100, p000, dist100, dist000, v.color, v.color);
if (edgeTable[cubeindex] & 8) vertlist[3] = vertexInterp(isolevel, p000, p010, dist000, dist010, v.color, v.color);
if (edgeTable[cubeindex] & 16) vertlist[4] = vertexInterp(isolevel, p011, p111, dist011, dist111, v.color, v.color);
if (edgeTable[cubeindex] & 32) vertlist[5] = vertexInterp(isolevel, p111, p101, dist111, dist101, v.color, v.color);
if (edgeTable[cubeindex] & 64) vertlist[6] = vertexInterp(isolevel, p101, p001, dist101, dist001, v.color, v.color);
if (edgeTable[cubeindex] & 128)
vertlist[7] = vertexInterp(isolevel, p001, p011, dist001, dist011, v.color, v.color);
if (edgeTable[cubeindex] & 256)
vertlist[8] = vertexInterp(isolevel, p010, p011, dist010, dist011, v.color, v.color);
if (edgeTable[cubeindex] & 512)
vertlist[9] = vertexInterp(isolevel, p110, p111, dist110, dist111, v.color, v.color);
if (edgeTable[cubeindex] & 1024)
vertlist[10] = vertexInterp(isolevel, p100, p101, dist100, dist101, v.color, v.color);
if (edgeTable[cubeindex] & 2048)
vertlist[11] = vertexInterp(isolevel, p000, p001, dist000, dist001, v.color, v.color);
for (int i = 0; triTable[cubeindex][i] != -1; i += 3) {
Triangle t;
t.v0 = vertlist[triTable[cubeindex][i + 0]];
t.v1 = vertlist[triTable[cubeindex][i + 1]];
t.v2 = vertlist[triTable[cubeindex][i + 2]];
appendTriangle(t);
}
}
using Vertex = MarchingCubesData::Vertex;
using Triangle = MarchingCubesData::Triangle;
__device__
Vertex MarchingCubesData::vertexInterp(float isolevel, const float3 &p1, const float3 &p2, float d1, float d2,
const uchar4 &c1, const uchar4 &c2) const {
Vertex r1;
r1.p = p1;
r1.c = make_float3(c1.x, c1.y, c1.z) / 255.f;
Vertex r2;
r2.p = p2;
r2.c = make_float3(c2.x, c2.y, c2.z) / 255.f;
if (abs(isolevel - d1) < 0.00001f) return r1;
if (abs(isolevel - d2) < 0.00001f) return r2;
if (abs(d1 - d2) < 0.00001f) return r1;
float mu = (isolevel - d1) / (d2 - d1);
Vertex res;
res.p.x = p1.x + mu * (p2.x - p1.x); // Positions
res.p.y = p1.y + mu * (p2.y - p1.y);
res.p.z = p1.z + mu * (p2.z - p1.z);
res.c.x = (float) (c1.x + mu * (float) (c2.x - c1.x)) / 255.f; // Color
res.c.y = (float) (c1.y + mu * (float) (c2.y - c1.y)) / 255.f;
res.c.z = (float) (c1.z + mu * (float) (c2.z - c1.z)) / 255.f;
return res;
}
__device__
bool MarchingCubesData::isInBoxAA(const float3 &minCorner, const float3 &maxCorner, const float3 &pos) const {
if (pos.x < minCorner.x || pos.x > maxCorner.x) return false;
if (pos.y < minCorner.y || pos.y > maxCorner.y) return false;
if (pos.z < minCorner.z || pos.z > maxCorner.z) return false;
return true;
}
__device__
uint MarchingCubesData::append() {
uint addr = atomicAdd(d_numTriangles, 1);
//TODO check
return addr;
}
__device__
void MarchingCubesData::appendTriangle(const Triangle &t) {
if (*d_numTriangles >= d_params->m_maxNumTriangles) {
*d_numTriangles = d_params->m_maxNumTriangles;
return; // todo
}
uint addr = append();
if (addr >= d_params->m_maxNumTriangles) {
printf("marching cubes exceeded max number of triangles (addr, #tri, max#tri): (%d, %d, %d)\n", addr,
*d_numTriangles, d_params->m_maxNumTriangles);
*d_numTriangles = d_params->m_maxNumTriangles;
return; // todo
}
Triangle &triangle = d_triangles[addr];
triangle.v0 = t.v0;
triangle.v1 = t.v1;
triangle.v2 = t.v2;
return;
}
/** marching cube cuda*/
__global__ void resetMarchingCubesKernel(MarchingCubesData data) {
*data.d_numTriangles = 0;
}
__global__ void extractIsoSurfaceKernel(HashDataStruct hashData, RayCastData rayCastData, MarchingCubesData data) {
uint idx = blockIdx.x;
const HashEntry &entry = hashData.d_hash[idx];
if (entry.ptr != FREE_ENTRY) {
int3 pi_base = hashData.SDFBlockToVirtualVoxelPos(entry.pos);
int3 pi = pi_base + make_int3(threadIdx);
float3 worldPos = hashData.virtualVoxelPosToWorld(pi);
data.extractIsoSurfaceAtPosition(worldPos, hashData, rayCastData);
}
}
extern "C" void resetMarchingCubesCUDA(MarchingCubesData &data) {
const dim3 blockSize(1, 1, 1);
const dim3 gridSize(1, 1, 1);
resetMarchingCubesKernel<<<gridSize, blockSize>>>(data);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
extern "C" void
extractIsoSurfaceCUDA(const HashDataStruct &hashData, const RayCastData &rayCastData, const MarchingCubesParams ¶ms,
MarchingCubesData &data) {
const dim3 gridSize(params.m_hashNumBuckets * params.m_hashBucketSize, 1, 1);
const dim3 blockSize(params.m_sdfBlockSize, params.m_sdfBlockSize, params.m_sdfBlockSize);
extractIsoSurfaceKernel<<<gridSize, blockSize>>>(hashData, rayCastData, data);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
void CUDAMarchingCubesHashSDF::create(const MarchingCubesParams ¶ms) {
m_params = params;
m_data.allocate(m_params);
resetMarchingCubesCUDA(m_data);
}
void CUDAMarchingCubesHashSDF::extractIsoSurface(const HashDataStruct &hashData, const HashParams &hashParams,
const RayCastData &rayCastData, const vec3f &minCorner,
const vec3f &maxCorner, bool boxEnabled) {
resetMarchingCubesCUDA(m_data);
m_params.m_maxCorner = maxCorner;
m_params.m_minCorner = minCorner;
m_params.m_boxEnabled = boxEnabled;
m_data.updateParams(m_params);
extractIsoSurfaceCUDA(hashData, rayCastData, m_params, m_data);
copyTrianglesToCPU();
}
Mesh * CUDAMarchingCubesHashSDF::getMeshData() {
return m_meshData;
}
|
e7725a66001dc373c6a4f276b2af51cb70cb55fc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// #include "../common/book.h"
#include <stdio.h>
#define N 10
#define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) )
static void HandleError(hipError_t err, const char *file, int line)
{if (err != hipSuccess)
{printf("%s in %s at line %d\n", hipGetErrorString(err),file, line);
exit(EXIT_FAILURE);
}
}
__global__ void add( int *a, int *b, int *c ) {
int tid = blockIdx.x; // handle the data at this index
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
hipMalloc( (void**)&dev_a, N * sizeof(int) ) ;
hipMalloc( (void**)&dev_b, N * sizeof(int) ) ;
hipMalloc( (void**)&dev_c, N * sizeof(int) ) ;
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<N; i++) {
a[i] = -i;
b[i] = i * i;
}
// copy the arrays 'a' and 'b' to the GPU
hipMemcpy( dev_a, a, N * sizeof(int),
hipMemcpyHostToDevice ) ;
hipMemcpy( dev_b, b, N * sizeof(int),
hipMemcpyHostToDevice ) ;
hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
hipMemcpy( c, dev_c, N * sizeof(int),
hipMemcpyDeviceToHost ) ;
// display the results
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
return 0;
}
|
e7725a66001dc373c6a4f276b2af51cb70cb55fc.cu
|
// #include "../common/book.h"
#include <stdio.h>
#define N 10
#define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) )
static void HandleError(cudaError_t err, const char *file, int line)
{if (err != cudaSuccess)
{printf("%s in %s at line %d\n", cudaGetErrorString(err),file, line);
exit(EXIT_FAILURE);
}
}
__global__ void add( int *a, int *b, int *c ) {
int tid = blockIdx.x; // handle the data at this index
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
cudaMalloc( (void**)&dev_a, N * sizeof(int) ) ;
cudaMalloc( (void**)&dev_b, N * sizeof(int) ) ;
cudaMalloc( (void**)&dev_c, N * sizeof(int) ) ;
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<N; i++) {
a[i] = -i;
b[i] = i * i;
}
// copy the arrays 'a' and 'b' to the GPU
cudaMemcpy( dev_a, a, N * sizeof(int),
cudaMemcpyHostToDevice ) ;
cudaMemcpy( dev_b, b, N * sizeof(int),
cudaMemcpyHostToDevice ) ;
add<<<N,1>>>( dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( c, dev_c, N * sizeof(int),
cudaMemcpyDeviceToHost ) ;
// display the results
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
}
|
7f5c35f9c7b5b15893e0902b0060944b6a103009.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
Research 4 Fun
metaCuda.cu
Purpose: Calculates the n-th Fibonacci number an the Factorial of a number
from CUDA + Template Meta-Programming
@author O. A. Riveros
@version 1.0 28 May 2014 Santiago Chile.
*/
#include <iostream>
#include <ctime>
using namespace std;
// Begin CUDA
///////////////
// Fibonacci //
///////////////
template<unsigned long N>
__device__ unsigned long cuMetaFibonacci() {
return cuMetaFibonacci<N - 1>() + cuMetaFibonacci<N - 2>();
}
template<>
__device__ unsigned long cuMetaFibonacci<0>() {
return 1;
}
template<>
__device__ unsigned long cuMetaFibonacci<1>() {
return 1;
}
template<>
__device__ unsigned long cuMetaFibonacci<2>() {
return 1;
}
template<unsigned long N>
__global__ void cuFibonacci(unsigned long *out) {
*out = cuMetaFibonacci<N>();
}
///////////////
// Factorial //
///////////////
template<unsigned long N>
__device__ unsigned long cuMetaFactorial() {
return N * cuMetaFactorial<N - 1>();
}
template<>
__device__ unsigned long cuMetaFactorial<1>() {
return 1;
}
template<unsigned long N>
__global__ void cuFactorial(unsigned long *out) {
*out = cuMetaFactorial<N>();
}
// End CUDA
int main() {
///////////////
// Fibonacci //
///////////////
size_t size = sizeof(unsigned long);
unsigned long h_out[] = { 0 };
unsigned long *d_out;
hipMalloc((void **) &d_out, size);
hipMemcpy(d_out, h_out, size, hipMemcpyHostToDevice);
clock_t startTime = clock();
hipLaunchKernelGGL(( cuFibonacci<20>) , dim3(1), dim3(1), 0, 0, d_out);
clock_t endTime = clock();
clock_t clockTicksTaken = endTime - startTime;
hipMemcpy(h_out, d_out, size, hipMemcpyDeviceToHost);
cout << h_out[0] << endl;
hipFree(d_out);
double timeInSeconds = clockTicksTaken / (double) CLOCKS_PER_SEC;
cout << timeInSeconds << endl;
///////////////
// Factorial //
///////////////
hipMalloc((void **) &d_out, size);
hipMemcpy(d_out, h_out, size, hipMemcpyHostToDevice);
startTime = clock();
hipLaunchKernelGGL(( cuFactorial<20>) , dim3(1), dim3(1), 0, 0, d_out);
endTime = clock();
clockTicksTaken = endTime - startTime;
hipMemcpy(h_out, d_out, size, hipMemcpyDeviceToHost);
cout << h_out[0] << endl;
hipFree(d_out);
timeInSeconds = clockTicksTaken / (double) CLOCKS_PER_SEC;
cout << timeInSeconds << endl;
}
// Original Output
// 11:56:05 Build Finished (took 16s.185ms)
// 6765
// 4.2e-05
// 2432902008176640000
// 9e-06
|
7f5c35f9c7b5b15893e0902b0060944b6a103009.cu
|
/**
Research 4 Fun
metaCuda.cu
Purpose: Calculates the n-th Fibonacci number an the Factorial of a number
from CUDA + Template Meta-Programming
@author O. A. Riveros
@version 1.0 28 May 2014 Santiago Chile.
*/
#include <iostream>
#include <ctime>
using namespace std;
// Begin CUDA
///////////////
// Fibonacci //
///////////////
template<unsigned long N>
__device__ unsigned long cuMetaFibonacci() {
return cuMetaFibonacci<N - 1>() + cuMetaFibonacci<N - 2>();
}
template<>
__device__ unsigned long cuMetaFibonacci<0>() {
return 1;
}
template<>
__device__ unsigned long cuMetaFibonacci<1>() {
return 1;
}
template<>
__device__ unsigned long cuMetaFibonacci<2>() {
return 1;
}
template<unsigned long N>
__global__ void cuFibonacci(unsigned long *out) {
*out = cuMetaFibonacci<N>();
}
///////////////
// Factorial //
///////////////
template<unsigned long N>
__device__ unsigned long cuMetaFactorial() {
return N * cuMetaFactorial<N - 1>();
}
template<>
__device__ unsigned long cuMetaFactorial<1>() {
return 1;
}
template<unsigned long N>
__global__ void cuFactorial(unsigned long *out) {
*out = cuMetaFactorial<N>();
}
// End CUDA
int main() {
///////////////
// Fibonacci //
///////////////
size_t size = sizeof(unsigned long);
unsigned long h_out[] = { 0 };
unsigned long *d_out;
cudaMalloc((void **) &d_out, size);
cudaMemcpy(d_out, h_out, size, cudaMemcpyHostToDevice);
clock_t startTime = clock();
cuFibonacci<20> <<<1, 1>>>(d_out);
clock_t endTime = clock();
clock_t clockTicksTaken = endTime - startTime;
cudaMemcpy(h_out, d_out, size, cudaMemcpyDeviceToHost);
cout << h_out[0] << endl;
cudaFree(d_out);
double timeInSeconds = clockTicksTaken / (double) CLOCKS_PER_SEC;
cout << timeInSeconds << endl;
///////////////
// Factorial //
///////////////
cudaMalloc((void **) &d_out, size);
cudaMemcpy(d_out, h_out, size, cudaMemcpyHostToDevice);
startTime = clock();
cuFactorial<20> <<<1, 1>>>(d_out);
endTime = clock();
clockTicksTaken = endTime - startTime;
cudaMemcpy(h_out, d_out, size, cudaMemcpyDeviceToHost);
cout << h_out[0] << endl;
cudaFree(d_out);
timeInSeconds = clockTicksTaken / (double) CLOCKS_PER_SEC;
cout << timeInSeconds << endl;
}
// Original Output
// 11:56:05 Build Finished (took 16s.185ms)
// 6765
// 4.2e-05
// 2432902008176640000
// 9e-06
|
4e7f1b2e21ebf5fd2dd4f5bfcebfb9fab1c41728.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_func.cuh"
__global__ void create_checkerboard_kernel(unsigned char *pImage, unsigned int width, unsigned int height, int square_size, uchar3 color1, uchar3 color2)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y >= height || x >= width)
return;
// fill the image, alternate the colors
int index = (y * width + x) * 3;
if (x % square_size < (square_size / 2))
{
if (y % square_size < (square_size / 2))
{
pImage[index] = color1.x;
pImage[index + 1] = color1.y;
pImage[index + 2] = color1.z;
}
else
{
pImage[index] = color2.x;
pImage[index + 1] = color2.y;
pImage[index + 2] = color2.z;
}
}
else
{
if (y % square_size < (square_size / 2))
{
pImage[index] = color2.x;
pImage[index + 1] = color2.y;
pImage[index + 2] = color2.z;
}
else
{
pImage[index] = color1.x;
pImage[index + 1] = color1.y;
pImage[index + 2] = color1.z;
}
}
}
// fill an image with a chekcer_board (BGR)
void create_checkerboard(evo::Mat<unsigned char> image, int square_size)
{
dim3 block(32, 32);
dim3 grid((image.getWidth() + block.x - 1) / block.x, (image.getHeight() + block.y - 1) / block.y);
// define the two colors of the checkerboard
uchar3 color1 = make_uchar3(255, 255, 255);//white
uchar3 color2 = make_uchar3(0, 255, 0);//green
// call the kernel
create_checkerboard_kernel << <grid, block >> >((unsigned char*)image.data, image.getWidth(), image.getHeight(), square_size, color1, color2);
}
__global__ void replace_image_by_distance_kernel(const unsigned char *pImage, const float* pDepth, const unsigned char *pBackground, unsigned char *result, const float max_value, const unsigned int width, const unsigned int height, const unsigned int image_channels)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y >= height || x >= width)
return;
// get the depth of the current pixel
float z_distance = pDepth[y * width + x];
// replace part of the view
int index = (y * width + x) * 3;
if (isfinite(z_distance) && (z_distance > max_value))
{
result[index] = pBackground[index];
result[index + 1] = pBackground[index + 1];
result[index + 2] = pBackground[index + 2];
}
else
{
if (image_channels == 1)//gray image
{
int img_index = y * width + x;
result[index] = pImage[img_index];
result[index + 1] = pImage[img_index];
result[index + 2] = pImage[img_index];
}
else//color image
{
int img_index = (y * width + x) * image_channels;
result[index] = pImage[img_index];
result[index + 1] = pImage[img_index + 1];
result[index + 2] = pImage[img_index + 2];
}
}
}
// replace the current image by background if the distance if above the threshold
void replace_image_by_distance(evo::Mat<unsigned char> image, evo::Mat<float> distance_z, evo::Mat<unsigned char> background, evo::Mat<unsigned char> result, float max_value)
{
dim3 block(32, 32);
dim3 grid((image.getWidth() + block.x - 1) / block.x, (image.getHeight() + block.y - 1) / block.y);
// call the kernel
replace_image_by_distance_kernel << <grid, block >> >((unsigned char*)image.data, (float *)distance_z.data, (unsigned char*)background.data, (unsigned char*)result.data, max_value, image.getWidth(), image.getHeight(), image.getChannels());
}
|
4e7f1b2e21ebf5fd2dd4f5bfcebfb9fab1c41728.cu
|
#include "cuda_func.cuh"
__global__ void create_checkerboard_kernel(unsigned char *pImage, unsigned int width, unsigned int height, int square_size, uchar3 color1, uchar3 color2)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y >= height || x >= width)
return;
// fill the image, alternate the colors
int index = (y * width + x) * 3;
if (x % square_size < (square_size / 2))
{
if (y % square_size < (square_size / 2))
{
pImage[index] = color1.x;
pImage[index + 1] = color1.y;
pImage[index + 2] = color1.z;
}
else
{
pImage[index] = color2.x;
pImage[index + 1] = color2.y;
pImage[index + 2] = color2.z;
}
}
else
{
if (y % square_size < (square_size / 2))
{
pImage[index] = color2.x;
pImage[index + 1] = color2.y;
pImage[index + 2] = color2.z;
}
else
{
pImage[index] = color1.x;
pImage[index + 1] = color1.y;
pImage[index + 2] = color1.z;
}
}
}
// fill an image with a chekcer_board (BGR)
void create_checkerboard(evo::Mat<unsigned char> image, int square_size)
{
dim3 block(32, 32);
dim3 grid((image.getWidth() + block.x - 1) / block.x, (image.getHeight() + block.y - 1) / block.y);
// define the two colors of the checkerboard
uchar3 color1 = make_uchar3(255, 255, 255);//white
uchar3 color2 = make_uchar3(0, 255, 0);//green
// call the kernel
create_checkerboard_kernel << <grid, block >> >((unsigned char*)image.data, image.getWidth(), image.getHeight(), square_size, color1, color2);
}
__global__ void replace_image_by_distance_kernel(const unsigned char *pImage, const float* pDepth, const unsigned char *pBackground, unsigned char *result, const float max_value, const unsigned int width, const unsigned int height, const unsigned int image_channels)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y >= height || x >= width)
return;
// get the depth of the current pixel
float z_distance = pDepth[y * width + x];
// replace part of the view
int index = (y * width + x) * 3;
if (isfinite(z_distance) && (z_distance > max_value))
{
result[index] = pBackground[index];
result[index + 1] = pBackground[index + 1];
result[index + 2] = pBackground[index + 2];
}
else
{
if (image_channels == 1)//gray image
{
int img_index = y * width + x;
result[index] = pImage[img_index];
result[index + 1] = pImage[img_index];
result[index + 2] = pImage[img_index];
}
else//color image
{
int img_index = (y * width + x) * image_channels;
result[index] = pImage[img_index];
result[index + 1] = pImage[img_index + 1];
result[index + 2] = pImage[img_index + 2];
}
}
}
// replace the current image by background if the distance if above the threshold
void replace_image_by_distance(evo::Mat<unsigned char> image, evo::Mat<float> distance_z, evo::Mat<unsigned char> background, evo::Mat<unsigned char> result, float max_value)
{
dim3 block(32, 32);
dim3 grid((image.getWidth() + block.x - 1) / block.x, (image.getHeight() + block.y - 1) / block.y);
// call the kernel
replace_image_by_distance_kernel << <grid, block >> >((unsigned char*)image.data, (float *)distance_z.data, (unsigned char*)background.data, (unsigned char*)result.data, max_value, image.getWidth(), image.getHeight(), image.getChannels());
}
|
eb2df98e496dd25862a2b7292fe440f88222f8b1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define P2P_KERNEL(p2p_kernel,p2p_kernel_core) \
extern "C" __global__ \
void p2p_kernel(int *nvec,float *ivec,float *jvec,float *kvec)\
{\
int bx=blockIdx.x;\
int tx=threadIdx.x;\
int mblok=nvec[2];\
int ib,jb,jbase,jsize,jblok,nij;\
int tx7,jj7;\
int i,j,ij,jj;\
float dxij,dyij,dzij,rij,rsij=0;\
float eps=1e-6;\
float pi14=0.25/M_PI;\
float veci[6],veck[4];\
__shared__ float vecj[NBLOK0*7];\
rij=rsij;\
ib=bx*NBLOK0+tx;\
for(i=0;i<6;i++) veci[i]=ivec[6*ib+i];\
for(i=0;i<4;i++) veck[i]=0.0f;\
tx7=tx*7;\
nij=nvec[bx*mblok+10];\
for(ij=0;ij<nij;ij++){\
jbase=nvec[bx*mblok+2*ij+11];\
jsize=nvec[bx*mblok+2*ij+12];\
jblok=(jsize+NBLOK0-1)/NBLOK0;\
for(j=0;j<jblok-1;j++){\
jb=j*NBLOK0+jbase+tx;\
for(i=0;i<7;i++) vecj[tx7+i]=jvec[7*jb+i];\
__syncthreads();\
for(jj=jj7=0;jj<NBLOK0;jj++){\
p2p_kernel_core;\
}\
__syncthreads();\
}\
jb=j*NBLOK0+jbase+tx;\
for(i=0;i<7;i++) vecj[tx7+i]=jvec[7*jb+i];\
jb=j*NBLOK1+jbase;\
__syncthreads();\
for(jj=jj7=0;jj<jsize-(j*NBLOK0);jj++){\
p2p_kernel_core;\
}\
__syncthreads();\
}\
for(i=0;i<4;i++) kvec[4*ib+i]=veck[i];\
}
#define P2M_KERNEL(p2m_kernel,p2m_kernel_core) \
extern "C" __global__ \
void p2m_kernel(int *nvec,float *ivec,float *jvec,float *kvec)\
{\
int bx=blockIdx.x;\
int tx=threadIdx.x;\
int i,j,k,m,n,ib,jb,jj,jj7,jbase,jsize,jblok,nm,nms;\
int mblok=nvec[2],mp=nvec[6],nb,nc[3],nd;\
float rb=scal[0],xmin=scal[1],ymin=scal[2],zmin=scal[3];\
float xjc,yjc,zjc,xjjc,yjjc,zjjc,rh,al,be,eps=1e-6;\
float xx,s2,p,pn,p1,p2,fact,ere,eim,rhm,rhn;\
__shared__ int mg[NBLOK1],ng[NBLOK1];\
__shared__ float veci[2*NBLOK1],vecj[7*NBLOK1],veck[MPMAX];\
__shared__ float bnm[MPMAX];\
ib=bx*NBLOK1+tx;\
for(i=0;i<NBLOK1;i++){\
ng[i]=0;\
mg[i]=0;\
}\
for(n=0;n<mp;n++){\
for(m=0;m<=n;m++){\
nms=n*(n+1)/2+m;\
ng[nms]=n;\
mg[nms]=m;\
}\
}\
jblok=(MPMAX+NBLOK1-1)/NBLOK1;\
for(j=0;j<jblok-1;j++){\
jb=j*NBLOK1+tx;\
veck[jb]=kvec[jb];\
veck[jb]=kvec[jb];\
__syncthreads();\
}\
if(j*NBLOK1+tx<MPMAX){\
jb=j*NBLOK1+tx;\
veck[jb]=kvec[jb];\
veck[jb]=kvec[jb];\
}\
__syncthreads();\
for(i=0;i<2;i++) veci[2*tx+i]=0;\
__syncthreads();\
nb=nvec[bx*mblok+10];\
jbase=nvec[bx*mblok+11];\
jsize=nvec[bx*mblok+12];\
for(i=0;i<3;i++) nc[i]=0;\
k=0;\
i=1;\
while(nb!=0){\
j=2-k;\
nc[j]=nc[j]+nb%2*i;\
nb=nb/2;\
j=k+1;\
k=j%3;\
if(k==0) i=i*2;\
}\
nd=nc[0];\
nc[0]=nc[1];\
nc[1]=nc[2];\
nc[2]=nd;\
xjc=xmin+(nc[0]+0.5)*rb;\
yjc=ymin+(nc[1]+0.5)*rb;\
zjc=zmin+(nc[2]+0.5)*rb;\
jblok=(jsize+NBLOK1-1)/NBLOK1;\
for(j=0;j<jblok-1;j++){\
jb=j*NBLOK1+jbase+tx;\
for(i=0;i<7;i++) vecj[7*tx+i]=jvec[7*jb+i];\
__syncthreads();\
for(jj=jj7=0;jj<NBLOK1;jj++){\
p2m_kernel_core;\
__syncthreads();\
}\
}\
jb=j*NBLOK1+jbase+tx;\
for(i=0;i<7;i++) vecj[7*tx+i]=jvec[7*jb+i];\
__syncthreads();\
for(jj=jj7=0;jj<jsize-(j*NBLOK1);jj++){\
p2m_kernel_core;\
__syncthreads();\
}\
for(i=0;i<2;i++) ivec[2*ib+i]=veci[2*tx+i];\
}
#define M2L_KERNEL(m2l_kernel,m2l_kernel_core) \
extern "C" __global__ \
void m2l_kernel(int *nvec,float *ivec,float *jvec,float *kvec,float *lvec)\
{\
int bx=blockIdx.x;\
int tx=threadIdx.x;\
int i,j,k,m,n,ib,jb,ij,nij,jbase,jblok,je,nms,nmk,nks,jks,jnk;\
int mblok=nvec[2],mp=nvec[6],nrbm=nvec[7];\
int nb=0,nc[3],nd=0,mpdnm=(4*mp*mp*mp-mp)/3;\
float rb=scal[0],eps=1e-6;\
float xijc=0,yijc=0,zijc=0,rh,cnmre,cnmim,dnmre,dnmim;\
float sr,ank,ajk,ajn,fnmm,fnpm;\
float vecd[2];\
__shared__ int mg[NBLOK1],ng[NBLOK1];\
__shared__ float veci[6*NBLOK1],vecj[2*NBLOK1];\
__shared__ float ynmre[MPMAX],ynmim[MPMAX];\
nc[0]=0;\
rh=xijc+yijc+zijc+eps+nb+nc[0]+nd;\
ib=bx*NBLOK1+tx;\
nij=nvec[bx*mblok+10];\
for(i=0;i<NBLOK1;i++){\
ng[i]=0;\
mg[i]=0;\
}\
for(n=0;n<mp;n++){\
for(m=0;m<=n;m++){\
nms=n*(n+1)/2+m;\
ng[nms]=n;\
mg[nms]=m;\
}\
}\
jblok=(MPMAX+NBLOK1-1)/NBLOK1;\
for(j=0;j<jblok-1;j++){\
jb=j*NBLOK1+tx;\
ynmre[jb]=kvec[2*jb+0];\
ynmim[jb]=kvec[2*jb+2];\
__syncthreads();\
}\
if(j*NBLOK1+tx<MPMAX){\
jb=j*NBLOK1+tx;\
ynmre[jb]=kvec[2*jb+0];\
ynmim[jb]=kvec[2*jb+1];\
}\
__syncthreads();\
for(i=0;i<6;i++) veci[6*tx+i]=0;\
__syncthreads();\
for(ij=0;ij<nij;ij++){\
jbase=nvec[bx*mblok+2*ij+11];\
je=nvec[bx*mblok+2*ij+12];\
jb=jbase+tx;\
for(i=0;i<2;i++) vecj[2*tx+i]=jvec[2*jb+i];\
__syncthreads();\
m2l_kernel_core;\
for(i=0;i<2;i++) veci[6*tx+3*i]+=vecd[i];\
__syncthreads();\
}\
for(i=0;i<2;i++) ivec[2*ib+i]=veci[6*tx+3*i];\
}
#define L2P_KERNEL(l2p_kernel,l2p_kernel_core) \
extern "C" __global__ \
void l2p_kernel(int *nvec,float *ivec,float *jvec,float *kvec,float *lvec)\
{\
int bx=blockIdx.x;\
int tx=threadIdx.x;\
int i,j,k,m,n,ib,jb,jbase,jblok,nm,nms;\
int mblok=nvec[2],mp=nvec[6],nb,nc[3],nd;\
float rb=scal[0],xmin=scal[1],ymin=scal[2],zmin=scal[3];\
float xic,yic,zic,xiic,yiic,ziic,r,th,ph;\
float xx,yy=0,s2,p,pn,p1,p2,fact,ere,eim,eps=1e-6;\
float rsre=0,rsim=0,rrre=0,rrim=0,rthre=0,rthim=0,rphre=0,rphim=0;\
float g=0,gr=0,gth=0,gph=0,gx=0,gy=0,gz=0;\
float bnm,bth=0;\
__shared__ float veci[6*NBLOK1],vecj[2*NBLOK1],veck[4*NBLOK1],vecl[MPMAX];\
r=yy+bth+rsre+rsim+rrre+rrim+rthre+rthim+rphre+rphim+g+gr+gth+gph+gx+gy+gz;\
ib=bx*NBLOK1+tx;\
jblok=(MPMAX+NBLOK1-1)/NBLOK1;\
for(j=0;j<jblok-1;j++){\
jb=j*NBLOK1+tx;\
vecl[jb]=lvec[jb];\
vecl[jb]=lvec[jb];\
__syncthreads();\
}\
if(j*NBLOK1+tx<MPMAX){\
jb=j*NBLOK1+tx;\
vecl[jb]=lvec[jb];\
vecl[jb]=lvec[jb];\
}\
__syncthreads();\
nb=nvec[bx*mblok+10];\
jbase=nvec[bx*mblok+11];\
for(i=0;i<3;i++) nc[i]=0;\
k=0;\
i=1;\
while(nb!=0){\
j=2-k;\
nc[j]=nc[j]+nb%2*i;\
nb=nb/2;\
j=k+1;\
k=j%3;\
if(k==0) i=i*2;\
}\
nd=nc[0];\
nc[0]=nc[1];\
nc[1]=nc[2];\
nc[2]=nd;\
xic=xmin+(nc[0]+0.5)*rb;\
yic=ymin+(nc[1]+0.5)*rb;\
zic=zmin+(nc[2]+0.5)*rb;\
jb=jbase+tx;\
for(i=0;i<6;i++) veci[6*tx+i]=ivec[6*ib+i];\
for(i=0;i<2;i++) vecj[2*tx+i]=jvec[2*jb+i];\
for(i=0;i<4;i++) veck[4*tx+i]=0.0f;\
__syncthreads();\
l2p_kernel_core;\
for(i=0;i<4;i++) kvec[4*ib+i]=veck[4*tx+i];\
}
P2P_KERNEL(G_p2p_kernel,G_P2P_KERNEL_CORE);
P2P_KERNEL(Gni_p2p_kernel,GNI_P2P_KERNEL_CORE);
P2P_KERNEL(Gnj_p2p_kernel,GNJ_P2P_KERNEL_CORE);
P2M_KERNEL(G_p2m_kernel,G_P2M_KERNEL_CORE);
P2M_KERNEL(Gn_p2m_kernel,GN_P2M_KERNEL_CORE);
M2L_KERNEL(m2m_kernel,M2M_KERNEL_CORE);
M2L_KERNEL(m2l_kernel,M2L_KERNEL_CORE);
M2L_KERNEL(l2l_kernel,L2L_KERNEL_CORE);
L2P_KERNEL(G_l2p_kernel,G_L2P_KERNEL_CORE);
L2P_KERNEL(Gn_l2p_kernel,GN_L2P_KERNEL_CORE);
|
eb2df98e496dd25862a2b7292fe440f88222f8b1.cu
|
#define P2P_KERNEL(p2p_kernel,p2p_kernel_core) \
extern "C" __global__ \
void p2p_kernel(int *nvec,float *ivec,float *jvec,float *kvec)\
{\
int bx=blockIdx.x;\
int tx=threadIdx.x;\
int mblok=nvec[2];\
int ib,jb,jbase,jsize,jblok,nij;\
int tx7,jj7;\
int i,j,ij,jj;\
float dxij,dyij,dzij,rij,rsij=0;\
float eps=1e-6;\
float pi14=0.25/M_PI;\
float veci[6],veck[4];\
__shared__ float vecj[NBLOK0*7];\
rij=rsij;\
ib=bx*NBLOK0+tx;\
for(i=0;i<6;i++) veci[i]=ivec[6*ib+i];\
for(i=0;i<4;i++) veck[i]=0.0f;\
tx7=tx*7;\
nij=nvec[bx*mblok+10];\
for(ij=0;ij<nij;ij++){\
jbase=nvec[bx*mblok+2*ij+11];\
jsize=nvec[bx*mblok+2*ij+12];\
jblok=(jsize+NBLOK0-1)/NBLOK0;\
for(j=0;j<jblok-1;j++){\
jb=j*NBLOK0+jbase+tx;\
for(i=0;i<7;i++) vecj[tx7+i]=jvec[7*jb+i];\
__syncthreads();\
for(jj=jj7=0;jj<NBLOK0;jj++){\
p2p_kernel_core;\
}\
__syncthreads();\
}\
jb=j*NBLOK0+jbase+tx;\
for(i=0;i<7;i++) vecj[tx7+i]=jvec[7*jb+i];\
jb=j*NBLOK1+jbase;\
__syncthreads();\
for(jj=jj7=0;jj<jsize-(j*NBLOK0);jj++){\
p2p_kernel_core;\
}\
__syncthreads();\
}\
for(i=0;i<4;i++) kvec[4*ib+i]=veck[i];\
}
#define P2M_KERNEL(p2m_kernel,p2m_kernel_core) \
extern "C" __global__ \
void p2m_kernel(int *nvec,float *ivec,float *jvec,float *kvec)\
{\
int bx=blockIdx.x;\
int tx=threadIdx.x;\
int i,j,k,m,n,ib,jb,jj,jj7,jbase,jsize,jblok,nm,nms;\
int mblok=nvec[2],mp=nvec[6],nb,nc[3],nd;\
float rb=scal[0],xmin=scal[1],ymin=scal[2],zmin=scal[3];\
float xjc,yjc,zjc,xjjc,yjjc,zjjc,rh,al,be,eps=1e-6;\
float xx,s2,p,pn,p1,p2,fact,ere,eim,rhm,rhn;\
__shared__ int mg[NBLOK1],ng[NBLOK1];\
__shared__ float veci[2*NBLOK1],vecj[7*NBLOK1],veck[MPMAX];\
__shared__ float bnm[MPMAX];\
ib=bx*NBLOK1+tx;\
for(i=0;i<NBLOK1;i++){\
ng[i]=0;\
mg[i]=0;\
}\
for(n=0;n<mp;n++){\
for(m=0;m<=n;m++){\
nms=n*(n+1)/2+m;\
ng[nms]=n;\
mg[nms]=m;\
}\
}\
jblok=(MPMAX+NBLOK1-1)/NBLOK1;\
for(j=0;j<jblok-1;j++){\
jb=j*NBLOK1+tx;\
veck[jb]=kvec[jb];\
veck[jb]=kvec[jb];\
__syncthreads();\
}\
if(j*NBLOK1+tx<MPMAX){\
jb=j*NBLOK1+tx;\
veck[jb]=kvec[jb];\
veck[jb]=kvec[jb];\
}\
__syncthreads();\
for(i=0;i<2;i++) veci[2*tx+i]=0;\
__syncthreads();\
nb=nvec[bx*mblok+10];\
jbase=nvec[bx*mblok+11];\
jsize=nvec[bx*mblok+12];\
for(i=0;i<3;i++) nc[i]=0;\
k=0;\
i=1;\
while(nb!=0){\
j=2-k;\
nc[j]=nc[j]+nb%2*i;\
nb=nb/2;\
j=k+1;\
k=j%3;\
if(k==0) i=i*2;\
}\
nd=nc[0];\
nc[0]=nc[1];\
nc[1]=nc[2];\
nc[2]=nd;\
xjc=xmin+(nc[0]+0.5)*rb;\
yjc=ymin+(nc[1]+0.5)*rb;\
zjc=zmin+(nc[2]+0.5)*rb;\
jblok=(jsize+NBLOK1-1)/NBLOK1;\
for(j=0;j<jblok-1;j++){\
jb=j*NBLOK1+jbase+tx;\
for(i=0;i<7;i++) vecj[7*tx+i]=jvec[7*jb+i];\
__syncthreads();\
for(jj=jj7=0;jj<NBLOK1;jj++){\
p2m_kernel_core;\
__syncthreads();\
}\
}\
jb=j*NBLOK1+jbase+tx;\
for(i=0;i<7;i++) vecj[7*tx+i]=jvec[7*jb+i];\
__syncthreads();\
for(jj=jj7=0;jj<jsize-(j*NBLOK1);jj++){\
p2m_kernel_core;\
__syncthreads();\
}\
for(i=0;i<2;i++) ivec[2*ib+i]=veci[2*tx+i];\
}
#define M2L_KERNEL(m2l_kernel,m2l_kernel_core) \
extern "C" __global__ \
void m2l_kernel(int *nvec,float *ivec,float *jvec,float *kvec,float *lvec)\
{\
int bx=blockIdx.x;\
int tx=threadIdx.x;\
int i,j,k,m,n,ib,jb,ij,nij,jbase,jblok,je,nms,nmk,nks,jks,jnk;\
int mblok=nvec[2],mp=nvec[6],nrbm=nvec[7];\
int nb=0,nc[3],nd=0,mpdnm=(4*mp*mp*mp-mp)/3;\
float rb=scal[0],eps=1e-6;\
float xijc=0,yijc=0,zijc=0,rh,cnmre,cnmim,dnmre,dnmim;\
float sr,ank,ajk,ajn,fnmm,fnpm;\
float vecd[2];\
__shared__ int mg[NBLOK1],ng[NBLOK1];\
__shared__ float veci[6*NBLOK1],vecj[2*NBLOK1];\
__shared__ float ynmre[MPMAX],ynmim[MPMAX];\
nc[0]=0;\
rh=xijc+yijc+zijc+eps+nb+nc[0]+nd;\
ib=bx*NBLOK1+tx;\
nij=nvec[bx*mblok+10];\
for(i=0;i<NBLOK1;i++){\
ng[i]=0;\
mg[i]=0;\
}\
for(n=0;n<mp;n++){\
for(m=0;m<=n;m++){\
nms=n*(n+1)/2+m;\
ng[nms]=n;\
mg[nms]=m;\
}\
}\
jblok=(MPMAX+NBLOK1-1)/NBLOK1;\
for(j=0;j<jblok-1;j++){\
jb=j*NBLOK1+tx;\
ynmre[jb]=kvec[2*jb+0];\
ynmim[jb]=kvec[2*jb+2];\
__syncthreads();\
}\
if(j*NBLOK1+tx<MPMAX){\
jb=j*NBLOK1+tx;\
ynmre[jb]=kvec[2*jb+0];\
ynmim[jb]=kvec[2*jb+1];\
}\
__syncthreads();\
for(i=0;i<6;i++) veci[6*tx+i]=0;\
__syncthreads();\
for(ij=0;ij<nij;ij++){\
jbase=nvec[bx*mblok+2*ij+11];\
je=nvec[bx*mblok+2*ij+12];\
jb=jbase+tx;\
for(i=0;i<2;i++) vecj[2*tx+i]=jvec[2*jb+i];\
__syncthreads();\
m2l_kernel_core;\
for(i=0;i<2;i++) veci[6*tx+3*i]+=vecd[i];\
__syncthreads();\
}\
for(i=0;i<2;i++) ivec[2*ib+i]=veci[6*tx+3*i];\
}
#define L2P_KERNEL(l2p_kernel,l2p_kernel_core) \
extern "C" __global__ \
void l2p_kernel(int *nvec,float *ivec,float *jvec,float *kvec,float *lvec)\
{\
int bx=blockIdx.x;\
int tx=threadIdx.x;\
int i,j,k,m,n,ib,jb,jbase,jblok,nm,nms;\
int mblok=nvec[2],mp=nvec[6],nb,nc[3],nd;\
float rb=scal[0],xmin=scal[1],ymin=scal[2],zmin=scal[3];\
float xic,yic,zic,xiic,yiic,ziic,r,th,ph;\
float xx,yy=0,s2,p,pn,p1,p2,fact,ere,eim,eps=1e-6;\
float rsre=0,rsim=0,rrre=0,rrim=0,rthre=0,rthim=0,rphre=0,rphim=0;\
float g=0,gr=0,gth=0,gph=0,gx=0,gy=0,gz=0;\
float bnm,bth=0;\
__shared__ float veci[6*NBLOK1],vecj[2*NBLOK1],veck[4*NBLOK1],vecl[MPMAX];\
r=yy+bth+rsre+rsim+rrre+rrim+rthre+rthim+rphre+rphim+g+gr+gth+gph+gx+gy+gz;\
ib=bx*NBLOK1+tx;\
jblok=(MPMAX+NBLOK1-1)/NBLOK1;\
for(j=0;j<jblok-1;j++){\
jb=j*NBLOK1+tx;\
vecl[jb]=lvec[jb];\
vecl[jb]=lvec[jb];\
__syncthreads();\
}\
if(j*NBLOK1+tx<MPMAX){\
jb=j*NBLOK1+tx;\
vecl[jb]=lvec[jb];\
vecl[jb]=lvec[jb];\
}\
__syncthreads();\
nb=nvec[bx*mblok+10];\
jbase=nvec[bx*mblok+11];\
for(i=0;i<3;i++) nc[i]=0;\
k=0;\
i=1;\
while(nb!=0){\
j=2-k;\
nc[j]=nc[j]+nb%2*i;\
nb=nb/2;\
j=k+1;\
k=j%3;\
if(k==0) i=i*2;\
}\
nd=nc[0];\
nc[0]=nc[1];\
nc[1]=nc[2];\
nc[2]=nd;\
xic=xmin+(nc[0]+0.5)*rb;\
yic=ymin+(nc[1]+0.5)*rb;\
zic=zmin+(nc[2]+0.5)*rb;\
jb=jbase+tx;\
for(i=0;i<6;i++) veci[6*tx+i]=ivec[6*ib+i];\
for(i=0;i<2;i++) vecj[2*tx+i]=jvec[2*jb+i];\
for(i=0;i<4;i++) veck[4*tx+i]=0.0f;\
__syncthreads();\
l2p_kernel_core;\
for(i=0;i<4;i++) kvec[4*ib+i]=veck[4*tx+i];\
}
P2P_KERNEL(G_p2p_kernel,G_P2P_KERNEL_CORE);
P2P_KERNEL(Gni_p2p_kernel,GNI_P2P_KERNEL_CORE);
P2P_KERNEL(Gnj_p2p_kernel,GNJ_P2P_KERNEL_CORE);
P2M_KERNEL(G_p2m_kernel,G_P2M_KERNEL_CORE);
P2M_KERNEL(Gn_p2m_kernel,GN_P2M_KERNEL_CORE);
M2L_KERNEL(m2m_kernel,M2M_KERNEL_CORE);
M2L_KERNEL(m2l_kernel,M2L_KERNEL_CORE);
M2L_KERNEL(l2l_kernel,L2L_KERNEL_CORE);
L2P_KERNEL(G_l2p_kernel,G_L2P_KERNEL_CORE);
L2P_KERNEL(Gn_l2p_kernel,GN_L2P_KERNEL_CORE);
|
94e687e59b6e8f170bdacc32f47580bfb66c7410.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* castRaysIntoSurfels: associate locations in an image plane with surfels in a cloud
*
* Evan Herbst
* 7 / 9 / 13
*/
#include <stdint.h>
#include <GL/gl.h>
#include <cuda_gl_interop.h>
#include "cuda_util/cudaUtils.h"
static const uint32_t blockSize1D = 32; //max cuda threads per block dimension; use 32 for Fermi architecture, 16 for Tesla
__global__ void remapDepthsFromUnitRangeKernel(const uint32_t* ids, float* depths, const uint32_t width, const uint32_t height, const float znear, const float zfar)
{
const uint32_t i = blockIdx.y * blockSize1D + threadIdx.y, j = blockIdx.x * blockSize1D + threadIdx.x, l = i * width + j;
if(ids[l] > 0) depths[l] = (znear * zfar / (znear - zfar)) / (depths[l] - zfar / (zfar - znear));
}
/*
* edit depthsPBO to remap depths from [0, 1] to physical values
*/
void remapDepthsFromUnitRangeCUDA(const uint32_t width, const uint32_t height, const GLuint idsPBO, const GLuint depthsPBO, const float znear, const float zfar)
{
CUDA_CALL(hipGLRegisterBufferObject(idsPBO));
CUDA_CALL(hipGLRegisterBufferObject(depthsPBO));
uint32_t* idPtr;
float* depthPtr;
CUDA_CALL(hipGLMapBufferObject__((void**)&idPtr, idsPBO));
CUDA_CALL(hipGLMapBufferObject__((void**)&depthPtr, depthsPBO));
const dim3 blockSize(blockSize1D, blockSize1D, 1);
const dim3 numBlocks((uint32_t)ceil(width / blockSize.x), (uint32_t)ceil(height / blockSize.y), 1);
hipLaunchKernelGGL(( remapDepthsFromUnitRangeKernel), dim3(numBlocks), dim3(blockSize), 0, 0, idPtr, depthPtr, width, height, znear, zfar);
CUDA_CALL(hipGLUnmapBufferObject(idsPBO));
CUDA_CALL(hipGLUnmapBufferObject(depthsPBO));
CUDA_CALL(hipGLUnregisterBufferObject(idsPBO));
CUDA_CALL(hipGLUnregisterBufferObject(depthsPBO));
}
|
94e687e59b6e8f170bdacc32f47580bfb66c7410.cu
|
/*
* castRaysIntoSurfels: associate locations in an image plane with surfels in a cloud
*
* Evan Herbst
* 7 / 9 / 13
*/
#include <stdint.h>
#include <GL/gl.h>
#include <cuda_gl_interop.h>
#include "cuda_util/cudaUtils.h"
static const uint32_t blockSize1D = 32; //max cuda threads per block dimension; use 32 for Fermi architecture, 16 for Tesla
__global__ void remapDepthsFromUnitRangeKernel(const uint32_t* ids, float* depths, const uint32_t width, const uint32_t height, const float znear, const float zfar)
{
const uint32_t i = blockIdx.y * blockSize1D + threadIdx.y, j = blockIdx.x * blockSize1D + threadIdx.x, l = i * width + j;
if(ids[l] > 0) depths[l] = (znear * zfar / (znear - zfar)) / (depths[l] - zfar / (zfar - znear));
}
/*
* edit depthsPBO to remap depths from [0, 1] to physical values
*/
void remapDepthsFromUnitRangeCUDA(const uint32_t width, const uint32_t height, const GLuint idsPBO, const GLuint depthsPBO, const float znear, const float zfar)
{
CUDA_CALL(cudaGLRegisterBufferObject(idsPBO));
CUDA_CALL(cudaGLRegisterBufferObject(depthsPBO));
uint32_t* idPtr;
float* depthPtr;
CUDA_CALL(cudaGLMapBufferObject((void**)&idPtr, idsPBO));
CUDA_CALL(cudaGLMapBufferObject((void**)&depthPtr, depthsPBO));
const dim3 blockSize(blockSize1D, blockSize1D, 1);
const dim3 numBlocks((uint32_t)ceil(width / blockSize.x), (uint32_t)ceil(height / blockSize.y), 1);
remapDepthsFromUnitRangeKernel<<<numBlocks, blockSize>>>(idPtr, depthPtr, width, height, znear, zfar);
CUDA_CALL(cudaGLUnmapBufferObject(idsPBO));
CUDA_CALL(cudaGLUnmapBufferObject(depthsPBO));
CUDA_CALL(cudaGLUnregisterBufferObject(idsPBO));
CUDA_CALL(cudaGLUnregisterBufferObject(depthsPBO));
}
|
87bcb3bb37c9dc461ee283a02edb26c6419b0bf6.hip
|
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include "moderngpu.cuh" // Include all MGPU kernels.
#include <typeinfo>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <vector>
#include <iostream>
#include <string>
#include <map>
#include "conio.h"
#include <fstream>
#include "pms.cuh"
//#include "kernelPrintf.h"
//#include "kernelCountLabelInGraphDB.h"
//#include "kernelMarkInvalidVertex.h"
//#include "markInvalidVertex.h"
//#include "checkArray.h"
//#include "displayArray.h"
//#include "checkDataBetweenHostAndGPU.h"
//#include "access_d_LO_from_idx_of_d_O.h"
//#include "countNumberOfLabelVetex.h"
//#include "countNumberOfEdgeLabel.h"
//#include "extractUniqueEdge.h"
//#include "ExtensionStructure.h"
//#include "getAndStoreExtension.h"
//#include "validEdge.h"
//#include "scanV.h"
//#include "getLastElement.h"
//#include "getValidExtension.h"
//#include "getUniqueExtension.h"
//#include "calcLabelAndStoreUniqueExtension.h"
//#include "calcBoundary.h"
//#include "calcSupport.h"
//#include "getSatisfyEdge.h"
//#include "header.h"
//
//
//#include "helper_timer.h"
using namespace std;
using namespace mgpu;
//
//#define CHECK(call) \
//{ \
//const hipError_t error = call; \
//if (error != hipSuccess) \
//{ \
//printf("Error: %s:%d, ", __FILE__, __LINE__); \
//printf("code:%d, reason: %s\n", error, hipGetErrorString(error)); \
//exit(1); \
//} \
//}
ContextPtr ctx;
int main(int argc, char** argv){
int status=0;
hipDeviceReset();
ctx = CreateCudaDevice(argc, argv, true);
cout << typeid(ctx).name() << endl;
//device_info();
//cdactx=*ctx;
StopWatchWin timer;
//exit(0);
//system("pause");
#pragma region "load database"
//Open file result.txt to write append
std::ofstream fout("result.txt", std::ios_base::app | std::ios_base::out);
timer.start();
PMS pms; //To i tng PMS.
pms.os=&fout;
FUNCHECK(status=pms.prepareDataBase()); //chun b d liu
if(status!=0){
cout<<endl<<"prepareDataBase function failed"<<endl;
exit(1);
}
timer.stop();
//pms.printdb(); //hin th d liu
std::printf("\n\n**===-------------------------------------------------===**\n");
std::printf("Loading data...\n");
std::printf("Processing time: %f (ms)\n", timer.getTime());//Processing time: 6595
hTime=timer.getTime();
timer.reset();
#pragma endregion "end load database"
FUNCHECK(pms.extractAllEdgeInDB()); //T CSDL np vo device, trch tt c cc cnh trong CSDL song song
//pms.displayArrExtension(pms.hExtension.at(0).dExtension,pms.hExtension.at(0).noElem); //Nhng cnh ny c xem nh l mt m rng ca pattern P. Bc ny ch n thun l xy dng DFS Code cho cc cnh trong th.
timer.start();
FUNCHECK(pms.getValidExtension_pure()); //Trch cc m rng hp l (li<lj: ch xt cho n th v hng) ==> Notes: Cn phi xt cho trng hp a th v hng v c hng
timer.stop();
std::printf("\n\n**===-------------------------------------------------===**\n");
std::printf("getValidExtension_pure\n");
std::printf("Processing time: %f (ms)\n", timer.getTime());//Processing time: 8.730469 (ms)
hTime=timer.getTime();
timer.reset();
timer.start();
FUNCHECK(pms.extractUniEdge());
timer.stop();
std::printf("\n\n**===-------------------------------------------------===**\n");
std::printf("extractUniEdge\n");
std::printf("Processing time: %f (ms)\n", timer.getTime());//Processing time: 1.730469 (ms)
hTime=timer.getTime();
timer.reset();
timer.start();
FUNCHECK(pms.computeSupport()); //Tnh h tr ca c cnh trong UniEdge v loi b nhng m rng khng tho minsup
//n y, chng ta thu thp c cc m rng mt cnh tho minsup (hUniEdgeSatisfyMinSup)
//
//FUNCHECK(pms.Mining()); //kim tra DFS_CODE c phi l min hay khng, nu l min th ghi kt qu vo file result.txt, v xy dng Embedding Columns
timer.stop();
std::printf("\n\n**===-------------------------------------------------===**\n");
std::printf("computeSupport\n");
std::printf("Processing time: %f (ms)\n", timer.getTime()/1000);//Processing time: 15.730469 (s)
hTime=timer.getTime();
timer.reset();
timer.start();
//Duyt qua cc cnh tho minsup xy dng:
//DFSCODE, hEmbedding, hLevelPtrEmbedding, hLevelListVerRMP v hLevelRMP chun b khai thc.
//FUNCHECK(pms.initialize());
//Trch cc m rng tho minDFS_CODE ban u
FUNCHECK(pms.MiningDeeper(pms.hLevelEXT.at(0).vE.at(0), pms.hLevelUniEdgeSatisfyMinsup.at(0).vecUES.at(0)));
timer.stop();
std::printf("\n\n**===-------------------------------------------------===**\n");
std::printf("Mining()\n");
std::printf("Processing time: %f (ms)\n", timer.getTime());//Processing time: (ms)
hTime=timer.getTime();
system("pause");
return 0;
}
//int main(int argc, char** argv)
//{
// ContextPtr context = CreateCudaDevice(argc, argv, true);
//
// int noElem = 5;
// int* ptr = (int*)malloc(sizeof(int)*noElem);
// for (int i = 0; i < noElem; i++)
// {
// ptr[i]=i;
// cout<<ptr[i]<<" ";
// }
// cout<<endl;
// int *p=nullptr;
// hipMalloc((void**)&p,sizeof(int)*noElem);
// hipMemcpy(p,ptr,noElem*sizeof(int),hipMemcpyHostToDevice);
// cout<<"Input data"<<endl;
// kernelPrintdArr<<<1,100>>>(p,noElem);
// hipDeviceSynchronize();
// cout<<endl;
// //// int result = Reduce(p, noElem, *context);
// //// printf("Reduction total: %d\n\n", result);
// int result=0;
// //ScanExc(p, noElem, &result, *context);
// ScanExc(p, noElem, *context);
//// PrintArray(*data, "%4d", 10);
// kernelPrintdArr<<<1,100>>>(p,noElem);
// hipDeviceSynchronize();
// //printf("Exclusive scan:\n");
// //printf("Scan total: %d\n", result);
//
// hipFree(p);
//
// //// Run an exclusive scan.
// //ScanExc(data->get(), N, &total, context);
// //printf("Exclusive scan:\n");
// //PrintArray(*data, "%4d", 10);
// //printf("Scan total: %d\n", total);
//
// _getch();
// return 0;
//}
|
87bcb3bb37c9dc461ee283a02edb26c6419b0bf6.cu
|
#pragma once
#include "moderngpu.cuh" // Include all MGPU kernels.
#include <typeinfo>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <vector>
#include <iostream>
#include <string>
#include <map>
#include "conio.h"
#include <fstream>
#include "pms.cuh"
//#include "kernelPrintf.h"
//#include "kernelCountLabelInGraphDB.h"
//#include "kernelMarkInvalidVertex.h"
//#include "markInvalidVertex.h"
//#include "checkArray.h"
//#include "displayArray.h"
//#include "checkDataBetweenHostAndGPU.h"
//#include "access_d_LO_from_idx_of_d_O.h"
//#include "countNumberOfLabelVetex.h"
//#include "countNumberOfEdgeLabel.h"
//#include "extractUniqueEdge.h"
//#include "ExtensionStructure.h"
//#include "getAndStoreExtension.h"
//#include "validEdge.h"
//#include "scanV.h"
//#include "getLastElement.h"
//#include "getValidExtension.h"
//#include "getUniqueExtension.h"
//#include "calcLabelAndStoreUniqueExtension.h"
//#include "calcBoundary.h"
//#include "calcSupport.h"
//#include "getSatisfyEdge.h"
//#include "header.h"
//
//
//#include "helper_timer.h"
using namespace std;
using namespace mgpu;
//
//#define CHECK(call) \
//{ \
//const cudaError_t error = call; \
//if (error != cudaSuccess) \
//{ \
//printf("Error: %s:%d, ", __FILE__, __LINE__); \
//printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
//exit(1); \
//} \
//}
ContextPtr ctx;
int main(int argc, char** argv){
int status=0;
cudaDeviceReset();
ctx = CreateCudaDevice(argc, argv, true);
cout << typeid(ctx).name() << endl;
//device_info();
//cdactx=*ctx;
StopWatchWin timer;
//exit(0);
//system("pause");
#pragma region "load database"
//Open file result.txt to write append
std::ofstream fout("result.txt", std::ios_base::app | std::ios_base::out);
timer.start();
PMS pms; //Tạo đối tượng PMS.
pms.os=&fout;
FUNCHECK(status=pms.prepareDataBase()); //chuẩn bị dữ liệu
if(status!=0){
cout<<endl<<"prepareDataBase function failed"<<endl;
exit(1);
}
timer.stop();
//pms.printdb(); //hiển thị dữ liệu
std::printf("\n\n**===-------------------------------------------------===**\n");
std::printf("Loading data...\n");
std::printf("Processing time: %f (ms)\n", timer.getTime());//Processing time: 6595
hTime=timer.getTime();
timer.reset();
#pragma endregion "end load database"
FUNCHECK(pms.extractAllEdgeInDB()); //Từ CSDL đã nạp vào device, trích tất cả các cạnh trong CSDL song song
//pms.displayArrExtension(pms.hExtension.at(0).dExtension,pms.hExtension.at(0).noElem); //Những cạnh này được xem như là một mở rộng của pattern P. Bước này chỉ đơn thuần là xây dựng DFS Code cho các cạnh trong đồ thị.
timer.start();
FUNCHECK(pms.getValidExtension_pure()); //Trích các mở rộng hợp lệ (li<lj: chỉ xét cho đơn đồ thị vô hướng) ==> Notes: Cần phải xét cho trường hợp đa đồ thị vô hướng và có hướng
timer.stop();
std::printf("\n\n**===-------------------------------------------------===**\n");
std::printf("getValidExtension_pure\n");
std::printf("Processing time: %f (ms)\n", timer.getTime());//Processing time: 8.730469 (ms)
hTime=timer.getTime();
timer.reset();
timer.start();
FUNCHECK(pms.extractUniEdge());
timer.stop();
std::printf("\n\n**===-------------------------------------------------===**\n");
std::printf("extractUniEdge\n");
std::printf("Processing time: %f (ms)\n", timer.getTime());//Processing time: 1.730469 (ms)
hTime=timer.getTime();
timer.reset();
timer.start();
FUNCHECK(pms.computeSupport()); //Tính độ hộ trợ của cả cạnh trong UniEdge và loại bỏ những mở rộng không thoả minsup
//Đến đây, chúng ta đã thu thập được các mở rộng một cạnh thoả minsup (hUniEdgeSatisfyMinSup)
//
//FUNCHECK(pms.Mining()); //kiểm tra DFS_CODE có phải là min hay không, nếu là min thì ghi kết quả vào file result.txt, và xây dựng Embedding Columns
timer.stop();
std::printf("\n\n**===-------------------------------------------------===**\n");
std::printf("computeSupport\n");
std::printf("Processing time: %f (ms)\n", timer.getTime()/1000);//Processing time: 15.730469 (s)
hTime=timer.getTime();
timer.reset();
timer.start();
//Duyệt qua các cạnh thoả minsup để xây dựng:
//DFSCODE, hEmbedding, hLevelPtrEmbedding, hLevelListVerRMP và hLevelRMP để chuẩn bị khai thác.
//FUNCHECK(pms.initialize());
//Trích các mở rộng thoả minDFS_CODE ban đầu
FUNCHECK(pms.MiningDeeper(pms.hLevelEXT.at(0).vE.at(0), pms.hLevelUniEdgeSatisfyMinsup.at(0).vecUES.at(0)));
timer.stop();
std::printf("\n\n**===-------------------------------------------------===**\n");
std::printf("Mining()\n");
std::printf("Processing time: %f (ms)\n", timer.getTime());//Processing time: (ms)
hTime=timer.getTime();
system("pause");
return 0;
}
//int main(int argc, char** argv)
//{
// ContextPtr context = CreateCudaDevice(argc, argv, true);
//
// int noElem = 5;
// int* ptr = (int*)malloc(sizeof(int)*noElem);
// for (int i = 0; i < noElem; i++)
// {
// ptr[i]=i;
// cout<<ptr[i]<<" ";
// }
// cout<<endl;
// int *p=nullptr;
// cudaMalloc((void**)&p,sizeof(int)*noElem);
// cudaMemcpy(p,ptr,noElem*sizeof(int),cudaMemcpyHostToDevice);
// cout<<"Input data"<<endl;
// kernelPrintdArr<<<1,100>>>(p,noElem);
// cudaDeviceSynchronize();
// cout<<endl;
// //// int result = Reduce(p, noElem, *context);
// //// printf("Reduction total: %d\n\n", result);
// int result=0;
// //ScanExc(p, noElem, &result, *context);
// ScanExc(p, noElem, *context);
//// PrintArray(*data, "%4d", 10);
// kernelPrintdArr<<<1,100>>>(p,noElem);
// cudaDeviceSynchronize();
// //printf("Exclusive scan:\n");
// //printf("Scan total: %d\n", result);
//
// cudaFree(p);
//
// //// Run an exclusive scan.
// //ScanExc(data->get(), N, &total, context);
// //printf("Exclusive scan:\n");
// //PrintArray(*data, "%4d", 10);
// //printf("Scan total: %d\n", total);
//
// _getch();
// return 0;
//}
|
a33d9822179b0519ef17dacf22b9c70ad8cf126d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int *dev_idata;
hipMalloc((void **)&dev_idata, n * sizeof(int));
checkCUDAErrorWithLine("malloc dev_idata!!!");
hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
checkCUDAErrorWithLine("memcpy dev_idata from host!!!");
thrust::device_ptr<int> dev_thrust_idata(dev_idata);
// pass in cpu pointers here
thrust::device_vector<int> dev_vec_idata(dev_idata, dev_idata + n);
timer().startGpuTimer();
thrust::exclusive_scan(dev_vec_idata.begin(), dev_vec_idata.end(), dev_vec_idata.begin());
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
timer().endGpuTimer();
thrust::copy(dev_vec_idata.begin(), dev_vec_idata.end(), odata);
hipFree(dev_idata);
checkCUDAErrorWithLine("free dev_idata!!!");
}
}
}
|
a33d9822179b0519ef17dacf22b9c70ad8cf126d.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int *dev_idata;
cudaMalloc((void **)&dev_idata, n * sizeof(int));
checkCUDAErrorWithLine("malloc dev_idata!!!");
cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAErrorWithLine("memcpy dev_idata from host!!!");
thrust::device_ptr<int> dev_thrust_idata(dev_idata);
// pass in cpu pointers here
thrust::device_vector<int> dev_vec_idata(dev_idata, dev_idata + n);
timer().startGpuTimer();
thrust::exclusive_scan(dev_vec_idata.begin(), dev_vec_idata.end(), dev_vec_idata.begin());
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
timer().endGpuTimer();
thrust::copy(dev_vec_idata.begin(), dev_vec_idata.end(), odata);
cudaFree(dev_idata);
checkCUDAErrorWithLine("free dev_idata!!!");
}
}
}
|
2b4b61301153e4fdf0b9ebefea33fa8f3360b8ed.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef GPU_MEMORY_CU
#define GPU_MEMORY_CU
#include "GPU_Dll.h"
extern "C"
void CopyCPUToGPU( void* d_destData, void* h_srcData, int sizeInBytes )
{
CUDA_SAFE_CALL( hipMemcpy( d_destData, h_srcData, sizeInBytes, hipMemcpyHostToDevice ) );
}
extern "C"
void CopyGPUToCPU( void * h_destData, void* d_srcData, int sizeInBytes)
{
CUDA_SAFE_CALL( hipMemcpy( h_destData, d_srcData, sizeInBytes, hipMemcpyDeviceToHost ) );
}
extern "C"
void GPUAllocate( void** d_data, int sizeInBytes )
{
GPUMALLOC( d_data, sizeInBytes );
}
extern "C"
void CPUAllocateByCUDA( void** h_data, int sizeInBytes )
{
CPUMALLOC( h_data, sizeInBytes );
}
extern "C"
void GPUFree( void* d_data)
{
CUDA_SAFE_CALL( hipFree( d_data) );
}
extern "C"
void CPUFreeByCUDA( void* h_data)
{
CUDA_SAFE_CALL( hipHostFree( h_data) );
}
extern "C"
void resetGPU()
{
CUDA_SAFE_CALL( hipDeviceReset() );
}
#endif
|
2b4b61301153e4fdf0b9ebefea33fa8f3360b8ed.cu
|
#ifndef GPU_MEMORY_CU
#define GPU_MEMORY_CU
#include "GPU_Dll.h"
extern "C"
void CopyCPUToGPU( void* d_destData, void* h_srcData, int sizeInBytes )
{
CUDA_SAFE_CALL( cudaMemcpy( d_destData, h_srcData, sizeInBytes, cudaMemcpyHostToDevice ) );
}
extern "C"
void CopyGPUToCPU( void * h_destData, void* d_srcData, int sizeInBytes)
{
CUDA_SAFE_CALL( cudaMemcpy( h_destData, d_srcData, sizeInBytes, cudaMemcpyDeviceToHost ) );
}
extern "C"
void GPUAllocate( void** d_data, int sizeInBytes )
{
GPUMALLOC( d_data, sizeInBytes );
}
extern "C"
void CPUAllocateByCUDA( void** h_data, int sizeInBytes )
{
CPUMALLOC( h_data, sizeInBytes );
}
extern "C"
void GPUFree( void* d_data)
{
CUDA_SAFE_CALL( cudaFree( d_data) );
}
extern "C"
void CPUFreeByCUDA( void* h_data)
{
CUDA_SAFE_CALL( cudaFreeHost( h_data) );
}
extern "C"
void resetGPU()
{
CUDA_SAFE_CALL( cudaThreadExit() );
}
#endif
|
ce727429a77bf10f422bb1be3138a9083847b80d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "rgamma_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
hiprandState_t *state = NULL;
hipMalloc(&state, XSIZE*YSIZE);
int state_len = 1;
float *vals = NULL;
hipMalloc(&vals, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
float a = 2;
float scale = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
rgamma_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, state,state_len,vals,n,a,scale);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
rgamma_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, state,state_len,vals,n,a,scale);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
rgamma_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, state,state_len,vals,n,a,scale);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
ce727429a77bf10f422bb1be3138a9083847b80d.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "rgamma_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
curandState *state = NULL;
cudaMalloc(&state, XSIZE*YSIZE);
int state_len = 1;
float *vals = NULL;
cudaMalloc(&vals, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
float a = 2;
float scale = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
rgamma_kernel<<<gridBlock,threadBlock>>>(state,state_len,vals,n,a,scale);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
rgamma_kernel<<<gridBlock,threadBlock>>>(state,state_len,vals,n,a,scale);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
rgamma_kernel<<<gridBlock,threadBlock>>>(state,state_len,vals,n,a,scale);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
a142649f33fe00c67088edf7bfb4195b4df71130.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define SIZE 300000
#define BLOCK_SIZE 512
__global__ void reduction(int *A, int *B){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int data[BLOCK_SIZE];
data[threadIdx.x] = A[tid]; // load data to shared memory
__syncthreads();
// iterate of log base 2 block dimension. using stride of 2
for(int i = blockDim.x/2; i>0; i>>=1){
if(threadIdx.x < i){
if(data[threadIdx.x] < data[threadIdx.x + i]){
data[threadIdx.x] = data[threadIdx.x + i];
}
}
__syncthreads();
}
__syncthreads();
// thread 0 should write the maximum value to main memory
if(threadIdx.x == 0 )
B[blockIdx.x] = data[0];
}
int main(){
int A[SIZE];
int * B;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int * d_A, * d_B;
srand(time(NULL));
size_t size = SIZE*sizeof(int);
int GRIDSIZE = SIZE / (BLOCK_SIZE<<1);
if (GRIDSIZE % (BLOCK_SIZE<<1))
GRIDSIZE++;
B = (int *) malloc(sizeof(int)*GRIDSIZE);
dim3 dimBlock(BLOCK_SIZE,1,1);
dim3 dimGrid(GRIDSIZE,1,1);
for(int i = 0; i < SIZE; i++){
A[i] = rand()%10000;
if(i<GRIDSIZE)
B[i] = 0;
}
hipEventRecord(start);
hipMalloc((void **)&d_A, size);
hipMemcpy(d_A,A,size,hipMemcpyHostToDevice);
hipMalloc((void **)&d_B, GRIDSIZE*sizeof(int));
hipLaunchKernelGGL(( reduction), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B);
hipEventRecord(stop);
hipMemcpy(B, d_B, GRIDSIZE*sizeof(int), hipMemcpyDeviceToHost);
for(int i = 1; i < GRIDSIZE; i++){
if(B[0] < B[i])
B[0] = B[i];
}
hipEventSynchronize(stop);
float elapsed = 0;
hipEventElapsedTime(&elapsed, start, stop);
printf("Using Grid Size [%d, %d] and Block Size [%d, %d]..\n", dimGrid.x, dimGrid.y,dimBlock.x, dimBlock.y);
printf("maximum : %d\n", B[0]);
printf("Execution time : %f ms\n", elapsed);
hipFree(d_A);
hipFree(d_B);
}
|
a142649f33fe00c67088edf7bfb4195b4df71130.cu
|
#include <stdio.h>
#include <stdlib.h>
#define SIZE 300000
#define BLOCK_SIZE 512
__global__ void reduction(int *A, int *B){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int data[BLOCK_SIZE];
data[threadIdx.x] = A[tid]; // load data to shared memory
__syncthreads();
// iterate of log base 2 block dimension. using stride of 2
for(int i = blockDim.x/2; i>0; i>>=1){
if(threadIdx.x < i){
if(data[threadIdx.x] < data[threadIdx.x + i]){
data[threadIdx.x] = data[threadIdx.x + i];
}
}
__syncthreads();
}
__syncthreads();
// thread 0 should write the maximum value to main memory
if(threadIdx.x == 0 )
B[blockIdx.x] = data[0];
}
int main(){
int A[SIZE];
int * B;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int * d_A, * d_B;
srand(time(NULL));
size_t size = SIZE*sizeof(int);
int GRIDSIZE = SIZE / (BLOCK_SIZE<<1);
if (GRIDSIZE % (BLOCK_SIZE<<1))
GRIDSIZE++;
B = (int *) malloc(sizeof(int)*GRIDSIZE);
dim3 dimBlock(BLOCK_SIZE,1,1);
dim3 dimGrid(GRIDSIZE,1,1);
for(int i = 0; i < SIZE; i++){
A[i] = rand()%10000;
if(i<GRIDSIZE)
B[i] = 0;
}
cudaEventRecord(start);
cudaMalloc((void **)&d_A, size);
cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_B, GRIDSIZE*sizeof(int));
reduction<<<dimGrid,dimBlock>>>(d_A, d_B);
cudaEventRecord(stop);
cudaMemcpy(B, d_B, GRIDSIZE*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 1; i < GRIDSIZE; i++){
if(B[0] < B[i])
B[0] = B[i];
}
cudaEventSynchronize(stop);
float elapsed = 0;
cudaEventElapsedTime(&elapsed, start, stop);
printf("Using Grid Size [%d, %d] and Block Size [%d, %d]..\n", dimGrid.x, dimGrid.y,dimBlock.x, dimBlock.y);
printf("maximum : %d\n", B[0]);
printf("Execution time : %f ms\n", elapsed);
cudaFree(d_A);
cudaFree(d_B);
}
|
fda9ef38597ef3a020962eccd02f664bf24fb995.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
============================================================================
Name : sorting_segments.cu
Author : Rafael Schmid
Version :
Copyright : Your copyright notice
Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU
============================================================================
*/
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <iostream>
typedef unsigned int uint;
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 512
#endif
template <typename T>
struct Plus {
__host__ __device__
T operator()(const T x, const T y)
{
return x + y;
}
};
template <typename T>
struct Minus {
__host__ __device__
T operator()(const T x, const T y)
{
return x - y;
}
};
template<typename Op>
__global__ void adjustment(uint* d_vec, uint* d_seg, uint num_of_elements, uint* d_max ){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_elements) {
uint mostSignificantBit = (uint)log2((double)*d_max) + 1;
uint segIndex = d_seg[id] << mostSignificantBit;
Op op = Op();
d_vec[id] = op(d_vec[id], segIndex);
}
}
void cudaTest(hipError_t error) {
if (error != hipSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
exit (EXIT_FAILURE);
}
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if (errSync != hipSuccess)
printf("1: Sync kernel error: %s\n", hipGetErrorString(errSync));
if (errAsync != hipSuccess)
printf("1: Async kernel error: %s\n", hipGetErrorString(errAsync));
}
void print(uint* host_data, uint n) {
std::cout << "\n";
for (uint i = 0; i < n; i++) {
std::cout << host_data[i] << " ";
}
std::cout << "\n";
}
int main(void) {
uint num_of_segments;
uint num_of_elements;
uint i;
scanf("%d", &num_of_segments);
uint mem_size_seg = sizeof(uint) * (num_of_segments + 1);
uint *h_seg_aux = (uint *) malloc(mem_size_seg);
for (i = 0; i < num_of_segments + 1; i++)
scanf("%d", &h_seg_aux[i]);
scanf("%d", &num_of_elements);
int mem_size_vec = sizeof(uint) * num_of_elements;
uint *h_vec = (uint *) malloc(mem_size_vec);
uint *h_value = (uint *) malloc(mem_size_vec);
for (i = 0; i < num_of_elements; i++) {
scanf("%d", &h_vec[i]);
h_value[i] = i;
}
uint *h_seg = (uint *) malloc(mem_size_vec);
for (i = 0; i < num_of_segments; i++) {
for (uint j = h_seg_aux[i]; j < h_seg_aux[i + 1]; j++) {
h_seg[j] = i;
}
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
uint *d_value, *d_value_out, *d_vec, *d_vec_out, *d_max, *d_seg;
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
uint* max_val = (uint *) malloc(sizeof(uint));
cudaTest(hipMalloc((void **) &d_max, sizeof(uint)));
cudaTest(hipMalloc((void **) &d_vec, mem_size_vec));
cudaTest(hipMalloc((void **) &d_seg, mem_size_vec));
cudaTest(hipMalloc((void **) &d_value, mem_size_vec));
cudaTest(hipMalloc((void **) &d_vec_out, mem_size_vec));
cudaTest(hipMalloc((void **) &d_value_out, mem_size_vec));
cudaTest(hipMemcpy(d_value, h_value, mem_size_vec, hipMemcpyHostToDevice));
cudaTest(hipMemcpy(d_seg, h_seg, mem_size_vec, hipMemcpyHostToDevice));
void *d_temp = NULL;
size_t temp_bytes = 0;
int grid = ((num_of_elements-1)/BLOCK_SIZE) + 1;
float averageExecutions = 0;
for (uint i = 0; i < EXECUTIONS; i++) {
cudaTest(hipMemcpy(d_vec, h_vec, mem_size_vec, hipMemcpyHostToDevice));
/*
* maximum element of the array.
*/
hipEventRecord(start);
hipcub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_vec, d_max, num_of_elements);
hipMalloc(&d_temp_storage, temp_storage_bytes); // Allocate temporary storage
hipcub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_vec, d_max, num_of_elements); // Run max-reduction
/*
* add prefix to the elements
*/
hipLaunchKernelGGL(( adjustment<Plus<uint>>) , dim3(grid), dim3(BLOCK_SIZE), 0, 0, d_vec, d_seg, num_of_elements, d_max);
/*
* sort the vector
*/
hipcub::DeviceRadixSort::SortPairs(d_temp, temp_bytes, d_vec, d_vec_out,
d_value, d_value_out, num_of_elements);
hipMalloc((void **) &d_temp, temp_bytes);
hipcub::DeviceRadixSort::SortPairs(d_temp, temp_bytes, d_vec, d_vec_out,
d_value, d_value_out, num_of_elements);
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if (errSync != hipSuccess)
printf("4: Sync kernel error: %s\n", hipGetErrorString(errSync));
if (errAsync != hipSuccess)
printf("4: Async kernel error: %s\n", hipGetErrorString(errAsync));
hipLaunchKernelGGL(( adjustment<Minus<uint>>) , dim3(grid), dim3(BLOCK_SIZE), 0, 0, d_vec_out, d_seg, num_of_elements, d_max);
hipEventRecord(stop);
hipEventSynchronize(stop);
if (ELAPSED_TIME == 1) {
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
averageExecutions += milliseconds;
}
hipFree(d_temp_storage);
temp_storage_bytes = 0;
d_temp_storage = NULL;
hipFree(d_temp);
temp_bytes = 0;
d_temp = NULL;
hipDeviceSynchronize();
}
hipMemcpy(h_vec, d_vec_out, mem_size_vec, hipMemcpyDeviceToHost);
hipFree(d_max);
hipFree(d_seg);
hipFree(d_vec);
hipFree(d_vec_out);
hipFree(d_value);
hipFree(d_value_out);
if (ELAPSED_TIME != 1) {
print(h_vec, num_of_elements);
}
else {
std::cout << averageExecutions/EXECUTIONS << "\n";
}
free(h_seg_aux);
free(h_seg);
free(h_vec);
free(h_value);
return 0;
}
|
fda9ef38597ef3a020962eccd02f664bf24fb995.cu
|
/*
============================================================================
Name : sorting_segments.cu
Author : Rafael Schmid
Version :
Copyright : Your copyright notice
Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU
============================================================================
*/
#include <cub/util_allocator.cuh>
#include <cub/device/device_radix_sort.cuh>
#include <cub/device/device_reduce.cuh>
#include <iostream>
typedef unsigned int uint;
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 512
#endif
template <typename T>
struct Plus {
__host__ __device__
T operator()(const T x, const T y)
{
return x + y;
}
};
template <typename T>
struct Minus {
__host__ __device__
T operator()(const T x, const T y)
{
return x - y;
}
};
template<typename Op>
__global__ void adjustment(uint* d_vec, uint* d_seg, uint num_of_elements, uint* d_max ){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_elements) {
uint mostSignificantBit = (uint)log2((double)*d_max) + 1;
uint segIndex = d_seg[id] << mostSignificantBit;
Op op = Op();
d_vec[id] = op(d_vec[id], segIndex);
}
}
void cudaTest(cudaError_t error) {
if (error != cudaSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
exit (EXIT_FAILURE);
}
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("1: Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("1: Async kernel error: %s\n", cudaGetErrorString(errAsync));
}
void print(uint* host_data, uint n) {
std::cout << "\n";
for (uint i = 0; i < n; i++) {
std::cout << host_data[i] << " ";
}
std::cout << "\n";
}
int main(void) {
uint num_of_segments;
uint num_of_elements;
uint i;
scanf("%d", &num_of_segments);
uint mem_size_seg = sizeof(uint) * (num_of_segments + 1);
uint *h_seg_aux = (uint *) malloc(mem_size_seg);
for (i = 0; i < num_of_segments + 1; i++)
scanf("%d", &h_seg_aux[i]);
scanf("%d", &num_of_elements);
int mem_size_vec = sizeof(uint) * num_of_elements;
uint *h_vec = (uint *) malloc(mem_size_vec);
uint *h_value = (uint *) malloc(mem_size_vec);
for (i = 0; i < num_of_elements; i++) {
scanf("%d", &h_vec[i]);
h_value[i] = i;
}
uint *h_seg = (uint *) malloc(mem_size_vec);
for (i = 0; i < num_of_segments; i++) {
for (uint j = h_seg_aux[i]; j < h_seg_aux[i + 1]; j++) {
h_seg[j] = i;
}
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
uint *d_value, *d_value_out, *d_vec, *d_vec_out, *d_max, *d_seg;
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
uint* max_val = (uint *) malloc(sizeof(uint));
cudaTest(cudaMalloc((void **) &d_max, sizeof(uint)));
cudaTest(cudaMalloc((void **) &d_vec, mem_size_vec));
cudaTest(cudaMalloc((void **) &d_seg, mem_size_vec));
cudaTest(cudaMalloc((void **) &d_value, mem_size_vec));
cudaTest(cudaMalloc((void **) &d_vec_out, mem_size_vec));
cudaTest(cudaMalloc((void **) &d_value_out, mem_size_vec));
cudaTest(cudaMemcpy(d_value, h_value, mem_size_vec, cudaMemcpyHostToDevice));
cudaTest(cudaMemcpy(d_seg, h_seg, mem_size_vec, cudaMemcpyHostToDevice));
void *d_temp = NULL;
size_t temp_bytes = 0;
int grid = ((num_of_elements-1)/BLOCK_SIZE) + 1;
float averageExecutions = 0;
for (uint i = 0; i < EXECUTIONS; i++) {
cudaTest(cudaMemcpy(d_vec, h_vec, mem_size_vec, cudaMemcpyHostToDevice));
/*
* maximum element of the array.
*/
cudaEventRecord(start);
cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_vec, d_max, num_of_elements);
cudaMalloc(&d_temp_storage, temp_storage_bytes); // Allocate temporary storage
cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_vec, d_max, num_of_elements); // Run max-reduction
/*
* add prefix to the elements
*/
adjustment<Plus<uint>> <<< grid, BLOCK_SIZE>>>(d_vec, d_seg, num_of_elements, d_max);
/*
* sort the vector
*/
cub::DeviceRadixSort::SortPairs(d_temp, temp_bytes, d_vec, d_vec_out,
d_value, d_value_out, num_of_elements);
cudaMalloc((void **) &d_temp, temp_bytes);
cub::DeviceRadixSort::SortPairs(d_temp, temp_bytes, d_vec, d_vec_out,
d_value, d_value_out, num_of_elements);
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("4: Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("4: Async kernel error: %s\n", cudaGetErrorString(errAsync));
adjustment<Minus<uint>> <<< grid, BLOCK_SIZE>>>(d_vec_out, d_seg, num_of_elements, d_max);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
if (ELAPSED_TIME == 1) {
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
averageExecutions += milliseconds;
}
cudaFree(d_temp_storage);
temp_storage_bytes = 0;
d_temp_storage = NULL;
cudaFree(d_temp);
temp_bytes = 0;
d_temp = NULL;
cudaDeviceSynchronize();
}
cudaMemcpy(h_vec, d_vec_out, mem_size_vec, cudaMemcpyDeviceToHost);
cudaFree(d_max);
cudaFree(d_seg);
cudaFree(d_vec);
cudaFree(d_vec_out);
cudaFree(d_value);
cudaFree(d_value_out);
if (ELAPSED_TIME != 1) {
print(h_vec, num_of_elements);
}
else {
std::cout << averageExecutions/EXECUTIONS << "\n";
}
free(h_seg_aux);
free(h_seg);
free(h_vec);
free(h_value);
return 0;
}
|
1af7f1c702075c04ea66f94aa9ccc147bb63f651.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernelProcessEventsBatchAsync(uint8_t* gpuEventsX,uint8_t* gpuEventsY,int gpuEventListSize, float* gpuFilter, int fsx, int fsy, int fsz, float* gpuBuffer, int ringBufferIdx, int bsx, int bsy, int bsz, int fs_xy, int fn)
{
// Calculate filter idx
int filterPos = threadIdx.x + blockIdx.x * blockDim.x;
// Per block shared memory
__shared__ uint8_t gpuEventListSharedX[MAX_SHARED_GPU_EVENTS];
__shared__ uint8_t gpuEventListSharedY[MAX_SHARED_GPU_EVENTS];
// How many runs do we need to process all events
int processingRuns = ceilf((float)gpuEventListSize/MAX_SHARED_GPU_EVENTS);
// Events for each thread to read
int eventReadsPerThread = ceilf((float)MAX_SHARED_GPU_EVENTS/blockDim.x);
// Offset n global event buffer
int globalEventIdx = threadIdx.x;
// Idx valid
if (filterPos < fn) {
// Read filter coefficient from global memory
float filterVal = gpuFilter[filterPos];
// Compute x,y,z coodinates in buffer
int fz = filterPos / fs_xy;
int fxy = filterPos % fs_xy;
int fy = fxy / fsx;
int fx = fxy % fsx;
// Convert buffer coordinates (mirror all axes -> convolution instead of correlation)
// Origin for mirroring is x = w/2, y = h/2, z = 0
int bz = ((ringBufferIdx + (fsz - 1) - fz ) % bsz);
int bx_tmp = fsx / 2 - fx;
int by_tmp = fsy / 2 - fy;
int bPos_tmp = bz*bsy*bsx;
int sharedEventCnt = MAX_SHARED_GPU_EVENTS;
// Iterate over event list in blocks, stored in shared memory
for(int runIdx = 0; runIdx<processingRuns; runIdx++) {
// Last run ? Compute size of shared event list
if(runIdx+1 == processingRuns) {
sharedEventCnt = gpuEventListSize % MAX_SHARED_GPU_EVENTS;
}
// Compute index in shared memory
int localEventIdx = threadIdx.x;
// Fill the shared memory either with MAX_SHARED_GPU_EVENTS
// or use each thread mutlible times
for(int i = 0; i < eventReadsPerThread; i++) {
// Valid indices
if(localEventIdx >= sharedEventCnt)
break;
// Load event into shared memory by using one thread per event
gpuEventListSharedX[localEventIdx] = gpuEventsX[globalEventIdx];
gpuEventListSharedY[localEventIdx] = gpuEventsY[globalEventIdx];
// Goto next event for which this thread is responsible
localEventIdx += blockDim.x;
globalEventIdx += blockDim.x;
}
// Synchronize threads and wait until shared memory is filled
// TODO: Deadlock possible?
// At least one thread in each warp should hit that barrier to continue!
// Bad relationship between shared event list size and block size could cause problems ?!
__syncthreads();
// Iterate over every event block in shared memory
for(localEventIdx = 0; localEventIdx < sharedEventCnt; localEventIdx++) {
// Compute corresponding buffer coordinate
int bx = bx_tmp + gpuEventListSharedX[localEventIdx];
int by = by_tmp + gpuEventListSharedY[localEventIdx];
// Check for valid buffer position (filp buffer z)
if(bx >= 0 && bx < bsx && by >= 0 && by < bsy) {
int bufferPos = bPos_tmp + by*bsx + bx;
// Add each filter coefficient to the global buffer
atomicAdd(gpuBuffer + bufferPos,filterVal);
}
}
}
}
}
|
1af7f1c702075c04ea66f94aa9ccc147bb63f651.cu
|
#include "includes.h"
__global__ void kernelProcessEventsBatchAsync(uint8_t* gpuEventsX,uint8_t* gpuEventsY,int gpuEventListSize, float* gpuFilter, int fsx, int fsy, int fsz, float* gpuBuffer, int ringBufferIdx, int bsx, int bsy, int bsz, int fs_xy, int fn)
{
// Calculate filter idx
int filterPos = threadIdx.x + blockIdx.x * blockDim.x;
// Per block shared memory
__shared__ uint8_t gpuEventListSharedX[MAX_SHARED_GPU_EVENTS];
__shared__ uint8_t gpuEventListSharedY[MAX_SHARED_GPU_EVENTS];
// How many runs do we need to process all events
int processingRuns = ceilf((float)gpuEventListSize/MAX_SHARED_GPU_EVENTS);
// Events for each thread to read
int eventReadsPerThread = ceilf((float)MAX_SHARED_GPU_EVENTS/blockDim.x);
// Offset n global event buffer
int globalEventIdx = threadIdx.x;
// Idx valid
if (filterPos < fn) {
// Read filter coefficient from global memory
float filterVal = gpuFilter[filterPos];
// Compute x,y,z coodinates in buffer
int fz = filterPos / fs_xy;
int fxy = filterPos % fs_xy;
int fy = fxy / fsx;
int fx = fxy % fsx;
// Convert buffer coordinates (mirror all axes -> convolution instead of correlation)
// Origin for mirroring is x = w/2, y = h/2, z = 0
int bz = ((ringBufferIdx + (fsz - 1) - fz ) % bsz);
int bx_tmp = fsx / 2 - fx;
int by_tmp = fsy / 2 - fy;
int bPos_tmp = bz*bsy*bsx;
int sharedEventCnt = MAX_SHARED_GPU_EVENTS;
// Iterate over event list in blocks, stored in shared memory
for(int runIdx = 0; runIdx<processingRuns; runIdx++) {
// Last run ? Compute size of shared event list
if(runIdx+1 == processingRuns) {
sharedEventCnt = gpuEventListSize % MAX_SHARED_GPU_EVENTS;
}
// Compute index in shared memory
int localEventIdx = threadIdx.x;
// Fill the shared memory either with MAX_SHARED_GPU_EVENTS
// or use each thread mutlible times
for(int i = 0; i < eventReadsPerThread; i++) {
// Valid indices
if(localEventIdx >= sharedEventCnt)
break;
// Load event into shared memory by using one thread per event
gpuEventListSharedX[localEventIdx] = gpuEventsX[globalEventIdx];
gpuEventListSharedY[localEventIdx] = gpuEventsY[globalEventIdx];
// Goto next event for which this thread is responsible
localEventIdx += blockDim.x;
globalEventIdx += blockDim.x;
}
// Synchronize threads and wait until shared memory is filled
// TODO: Deadlock possible?
// At least one thread in each warp should hit that barrier to continue!
// Bad relationship between shared event list size and block size could cause problems ?!
__syncthreads();
// Iterate over every event block in shared memory
for(localEventIdx = 0; localEventIdx < sharedEventCnt; localEventIdx++) {
// Compute corresponding buffer coordinate
int bx = bx_tmp + gpuEventListSharedX[localEventIdx];
int by = by_tmp + gpuEventListSharedY[localEventIdx];
// Check for valid buffer position (filp buffer z)
if(bx >= 0 && bx < bsx && by >= 0 && by < bsy) {
int bufferPos = bPos_tmp + by*bsx + bx;
// Add each filter coefficient to the global buffer
atomicAdd(gpuBuffer + bufferPos,filterVal);
}
}
}
}
}
|
5e2ada978eaa1917e75e5bd04d4b55e213250a5c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <vector>
#include <sys/time.h>
#include <chrono>
#include <dirent.h>
using namespace std::chrono;
using namespace std;
vector<int> G_timestamps;
int getCurrentTime () {
return duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
}
void F_TIME_START () {
G_timestamps.push_back(getCurrentTime());
}
void F_TIME_END (string measuredName) {
int start = G_timestamps.back();
int end = getCurrentTime();
float diff = (end - start) / 1000.0;
G_timestamps.pop_back();
cout << endl << "## [" << measuredName << "]: " << diff << "s" << endl << endl;
}
void coutGPUStatus () {
size_t freem, totalm;
float free_m, total_m, used_m;
hipMemGetInfo((size_t*)&freem, (size_t*)&totalm);
free_m = (size_t) freem / 1048576.0;
total_m = (size_t) totalm / 1048576.0;
used_m = total_m - free_m;
printf ( "## Total: %f MB. Used %f MB. Free: %f MB. \n", total_m, used_m, free_m);
}
void coutResult(int& generation, int& max_fitness_value) {
cout << "Generation " << generation << ", currently best individual can activate " << max_fitness_value << " others" << endl;
}
void coutPopulation (vector <vector<int>>& population) {
cout << "Population:";
for (int i=0; i<population.size(); i++) {
cout << "\nIndiv: " << i << ": ";
for (int j=0; j<population[i].size(); j++) {
if (population[i][j] < 10) {
cout << population[i][j] << ", ";
}
else if (population[i][j] < 100) {
cout << population[i][j] << ", ";
}
else if (population[i][j] < 1000) {
cout << population[i][j] << ", ";
}
else if (population[i][j] < 10000) {
cout << population[i][j] << ", ";
}
else {
cout << population[i][j] << ",";
}
}
}
cout << "\n\n";
}
void coutIndividual (vector <vector<int>>& population, int i) {
cout << "Individual " << i << ":";
for (int j=0; j<population[i].size(); j++) {
if (population[i][j] < 10) {
cout << population[i][j] << ", ";
}
else if (population[i][j] < 100) {
cout << population[i][j] << ", ";
}
else if (population[i][j] < 1000) {
cout << population[i][j] << ", ";
}
else if (population[i][j] < 10000) {
cout << population[i][j] << ", ";
}
else {
cout << population[i][j] << ",";
}
}
cout << "\n\n";
}
float getInfluenceValue (int N, int inf_values_size, vector<float>& inf_values, vector<float>& inf_col_ind, vector<float>& inf_row_ptr, int x, int y) {
float infValue = 0;
int min = inf_row_ptr[x];
int max = x == N-1 ? inf_values_size-1 : inf_row_ptr[x+1]; //inf_values_size-1
for (int i=min; i<max; i++) {
if (inf_col_ind[i] == y) {
infValue = inf_values[i];
break;
}
}
return infValue;
}
void InfluenceSpreadPopulationStep (bool *dyn_activeNodesPerIndividual, vector<float>& inf_values, vector<float>& inf_col_ind, vector<float>& inf_row_ptr, int N, int nrOfChangedIndividuals, int inf_values_size, float INFLUENCE_THRESHOLD, vector<int>& changedIndividuals) {
for (int indiv_id = 0; indiv_id < nrOfChangedIndividuals; indiv_id++) {
for (int node_id = 0; node_id < N; node_id++) {
int indiv_index = changedIndividuals[indiv_id];
float infValue = 0; // total value of influence on the node
for (int i=0; i<N; i++) {
if (dyn_activeNodesPerIndividual[indiv_index * N + i] && node_id != i) { // if i-th element is active and is not the node
float result = getInfluenceValue(N, inf_values_size, inf_values, inf_col_ind, inf_row_ptr, i, node_id);
infValue += result; // add i-th element influence on the node
//printf("Influence %d on %d is: %f\n", i, node_id, result);
//printf("\ninfValue: %f, id: %d", infValue, id);
}
}
//printf("\ninfValue: %f, id: %d", infValue, id);
if (infValue >= INFLUENCE_THRESHOLD) { // if total influence on the node is greater than or equal to the INFLUENCE_THRESHOLD value
dyn_activeNodesPerIndividual[indiv_index * N + node_id] = true; // activate the node
}
}
}
}
vector <vector<float>> readData (string dataset_name, int N, string _EXPERIMENT_ID) {
vector <vector<float>> influence;
// initialization of the influence vector
for (int i=0; i<N; i++) {
cout << endl << i << " out of " << N << endl;
vector<float> row(N, 0);
influence.push_back(row);
if ((i + 1) * N % (N * N / 10) == 0) {
cout << "[Initialization of the influence matrix]: " << float((i + 1) * N) / (N * N) * 100 << "%" << endl;
}
}
// total number of interactions received by every node
vector<float> received(N, 0);
ifstream infile("./experiments_" + _EXPERIMENT_ID + "/" + dataset_name);
string line;
int _csv_id_hack = -1;
if (dataset_name.find(".csv") != std::string::npos) {
_csv_id_hack = 0;
}
if (infile.good()) {
int line_nr = 0;
while (getline(infile, line)) {
cout << "Reading raw data file, line nr: " << line_nr << endl;
//cout << line << endl;
istringstream iss(line);
int a, b;
if (!(iss >> a >> b)) { cout << "ERROR" << endl; break; } // error
if (a != b && a + _csv_id_hack < N && b + _csv_id_hack < N) {
influence[a + _csv_id_hack][b + _csv_id_hack] += 1; // temp inf_values, calculating the total number of iteractions from "i" to "j"
received [b + _csv_id_hack] += 1;
//cout << "message from " << a + _csv_id_hack << " to " << b + _csv_id_hack << endl;
}
line_nr++;
}
infile.close();
cout << "File reading finished successfully." << endl;
ofstream outfile ("./experiments-counted/" + dataset_name + "_influenceCounted_" + to_string(N));
if (outfile.good()) {
// Influence value calculated as the ratio of iteractions from "i" node to "j" node, to the total number of iteractions to the "j" node.
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
//cout << "Influence values calculations, step: " << i*N+(j+1) << "/" << N*N << endl;
if (i == j) {
outfile << i << " " << j << " " << -1 << "\n";
influence[i][j] = -1;
} else if (influence[i][j] > 0) {
if (received[j] != 0) {
influence[i][j] = influence[i][j] / received[j];
} else if (influence[i][j] != 0) {
cout << "Received array error";
}
/*cout << i << "'s influence on " << j << " equals: " << influence[i][j] << endl;*/
outfile << i << " " << j << " " << influence[i][j] << "\n";
} else {
influence[i][j] = 0;
}
}
}
cout << "Compressed file saved successfully." << endl;
outfile.close();
} else {
throw std::invalid_argument("readData - File " + dataset_name + " not saved.");
}
} else {
throw std::invalid_argument("readData - File " + dataset_name + " not found.");
}
return influence;
}
void defineInfluenceArrayAndVectors (string dataset_name, int N, vector<float>& inf_values, vector<float>& inf_col_ind, vector<float>& inf_row_ptr, string _EXPERIMENT_ID) {
//cout << "File reading started." << endl;
ifstream infile("./experiments-counted/" + dataset_name + "_influenceCounted_" + to_string(N));
if (infile.good()) { // reading the already calculated influence values
int line_nr = 0;
string line;
float last_a = -1;
while (getline(infile, line)) {
cout << "Reading influence file, line nr: " << line_nr << endl;
istringstream iss(line);
float a, b, c;
if (!(iss >> a >> b >> c)) { break; } // error
if (c != 0) {
if (a != last_a) {
inf_row_ptr.push_back(inf_values.size());
//cout << "add row ptr: " << inf_values.size() << endl;
last_a = a;
}
inf_values.push_back(c);
//cout << "add value: " << c << endl;
inf_col_ind.push_back(b);
//cout << "add col ind: " << b << endl;
}
line_nr++;
}
infile.close();
} else { // calculating influnce values
infile.close();
vector <vector<float>> influence = readData(dataset_name, N, _EXPERIMENT_ID);
// inf_values, inf_col_ind, inf_row_ptr creation, based on the influence array
for (int i=0; i<N; i++) {
bool added = false;
for (int j=0; j<N; j++) {
//cout << "Influence of " << i << " on " << j << " is equal to: " << influence[i][j] << endl;
if (influence[i][j] != 0) {
if (!added) {
inf_row_ptr.push_back(inf_values.size());
//cout << "add row ptr: " << inf_values.size() << endl;
added = true;
}
inf_values.push_back(influence[i][j]);
//cout << "add value: " << influence[i][j] << endl;
inf_col_ind.push_back(j);
//cout << "add col ind: " << j << endl;
}
}
if (!added) {
//inf_row_ptr.push_back(-1);
}
}
/*cout << "\n\n size of influence array: " << sizeof(influence) + sizeof(float) * influence.capacity() * influence.capacity();
cout << "\n\n Total size of vectors: "
<< sizeof(inf_values) + sizeof(float) * inf_values.capacity()
+ sizeof(inf_col_ind) + sizeof(float) * inf_col_ind.capacity()
+ sizeof(inf_row_ptr) + sizeof(float) * inf_row_ptr.capacity() << "\n\n";*/
}
}
void createPopulation (int nrOfIndividuals, int N, int toFind, vector <vector<int>>& population) {
// creating random individuals within population
for (int i = 0; i<nrOfIndividuals; i++) {
vector<int> row;
population.push_back(row);
cout << "Creating individual " << i << " of " << nrOfIndividuals << endl;
for (int j = 0; j<toFind; j++) {
int rand_id = rand() % N;
bool alreadyAdded = true;
while (alreadyAdded) {
alreadyAdded = false;
for (int k=0; k<population[i].size(); k++) {
if (population[i][k] == rand_id) {
alreadyAdded = true;
rand_id = rand() % N;
}
}
}
//cout << "pushing: " << rand_id << endl;
population[i].push_back(rand_id);
}
}
}
void createPopulationSample (int nrOfIndividuals, int N, int toFind, vector <vector<int>>& population) {
// creating one individual - used as a sample e.g. for GPU vs CPU tests
vector<int> row;
population.push_back(row);
for (int x = 0; x<toFind; x++) {
population[0].push_back(x);
}
}
void setPopulationFitness (vector<vector<int>>& population, int nrOfIndividuals, int N, int inf_values_size, float& INFLUENCE_THRESHOLD, int STEPS_MAX, vector<float>& inf_values, vector<float>& inf_col_ind, vector<float>& inf_row_ptr, int toFind, vector<int>& fitness, int THREADS_PER_BLOCK) {
//bool activeNodesPerIndividual[nrOfIndividuals][N];
bool *dyn_activeNodesPerIndividual = new bool[nrOfIndividuals*N];
for (int i=0; i<nrOfIndividuals; i++) {
for (int j=0; j<N; j++) {
int index = N * i + j;
dyn_activeNodesPerIndividual[index] = false;
}
for (int j=0; j<toFind; j++) {
int index = N * i + population[i][j];
dyn_activeNodesPerIndividual[index] = true;
}
}
int active [nrOfIndividuals];
vector<int> changedIndividuals;
for (int i=0; i<nrOfIndividuals; i++) {
active[i] = toFind;
changedIndividuals.push_back(i);
}
int step_counter = 0;
while (step_counter < STEPS_MAX && changedIndividuals.size() > 0) {
//cout << "Step: " << step_counter << " / " << STEPS_MAX << endl;
int nrOfChangedIndividuals = changedIndividuals.size();
cout << "nrOfChangedIndividuals " << nrOfChangedIndividuals << endl;
F_TIME_START();
InfluenceSpreadPopulationStep (dyn_activeNodesPerIndividual, inf_values, inf_col_ind, inf_row_ptr, N, nrOfChangedIndividuals, inf_values_size, INFLUENCE_THRESHOLD, changedIndividuals);
F_TIME_END("host functions");
changedIndividuals.clear();
int curr_active;
for (int i=0; i<nrOfIndividuals; i++) {
curr_active = 0;
for (int j=0; j<N; j++) {
int index = N * i + j;
if (dyn_activeNodesPerIndividual[index]) {
curr_active++;
}
}
if (curr_active != active[i]) {
changedIndividuals.push_back(i);
}
active[i] = curr_active;
}
step_counter++;
}
for (int i = 0; i < nrOfIndividuals; i++) {
int individualFitness = 0;
for (int j = 0; j < N; j++) {
int index = N * i + j;
if (dyn_activeNodesPerIndividual[index]) {
individualFitness++;
//cout << "Activated " << j << endl;
}
}
//cout << "individualFitness: " << individualFitness << endl;
//cout << "toFind: " << toFind << endl;
// acceptable `error`
/*if (individualFitness-toFind < 0) {
cout << "# Crossover/mutation overlapping" << endl; // can happen because of random crossover and mutation
//coutIndividual(population, i);
}*/
//cout << "fitness Indiv: " << i << ": " << individualFitness-toFind << endl;
fitness.push_back(individualFitness-toFind);
}
}
void performPopulationSelection (vector<vector<int>>& population, int& nrOfIndividuals, int N, int inf_values_size, float& INFLUENCE_THRESHOLD, int& groupSize, int& STEPS_MAX, vector<float>& inf_values, vector<float>& inf_col_ind, vector<float>& inf_row_ptr, int& toFind, int& max_fitness_value, vector<int>& max_fitness_individual, int THREADS_PER_BLOCK) {
vector<int> fitness;
F_TIME_START();
setPopulationFitness(population, nrOfIndividuals, N, inf_values_size, INFLUENCE_THRESHOLD, STEPS_MAX, inf_values, inf_col_ind, inf_row_ptr, toFind, fitness, THREADS_PER_BLOCK);
F_TIME_END("selection - fitness count");
F_TIME_START();
vector<vector<int>> newPopulation;
while (newPopulation.size() != population.size()) {
vector<int> newGroup;
bool alreadyAdded[nrOfIndividuals];
for (int i=0; i<nrOfIndividuals; i++) {
alreadyAdded[i] = false;
}
for (int j=0; j<groupSize; j++) {
int randIndiv = rand() % nrOfIndividuals;
while (alreadyAdded[randIndiv]) {
randIndiv = rand() % nrOfIndividuals;
}
newGroup.push_back(randIndiv);
}
int curr_best_fitness = -1;
int curr_best_id = -1;
int currentFitness = -1;
for (int j=0; j<newGroup.size(); j++) {
currentFitness = fitness[newGroup[j]];
if (currentFitness > curr_best_fitness) {
curr_best_fitness = currentFitness;
curr_best_id = j;
}
}
newPopulation.push_back(population[newGroup[curr_best_id]]);
if (curr_best_fitness > max_fitness_value) {
max_fitness_individual = population[newGroup[curr_best_id]];
max_fitness_value = curr_best_fitness;
}
}
population = newPopulation;
F_TIME_END("selection - population swapping");
}
// TODO performCrossover on DEVICE (nrOfIndividuals/2 threads (from 0 to nr/2 - 1), ids: id*2, id*2+1
void performCrossover (vector<vector<int>>& population, int& nrOfIndividuals, float& crossover_ratio, int& toFind) {
float split_ratio = 0.5;
float split_point = split_ratio*toFind;
int id_first = -1;
int id_second = -1;
for (int i=0; i<nrOfIndividuals; i++) {
int cross = rand() % 100;
if (cross < crossover_ratio * 100) {
if (id_first == -1) {
id_first = i;
} else {
id_second = i;
}
}
if (id_second != -1) {
for (int j=0; j<split_point; j++) {
float temp = population[id_first][j];
population[id_first][j] = population[id_second][j];
population[id_second][j] = temp;
}
id_first = -1;
id_second = -1;
}
} // allows to node doubling (fitness = -1 can happen)
}
// TODO performMutation on DEVICE
void performMutation (vector<vector<int>>& population, int& nrOfIndividuals, float& mutation_ratio, float& mutation_potency, int& toFind, int N) {
for (int i=0; i<nrOfIndividuals; i++) {
int mutation = rand() % 100;
if (mutation < mutation_ratio * 100) {
for (int j=0; j<mutation_potency*toFind; j++) {
population[i][rand() % toFind] = rand() % N;
}
}
} // allows to node doubling (fitness = -1 can happen)
}
bool anyLimitReached(int resultBufferSize, float resultMinDiff, vector<int> &resultsBuffer, int generation, int generationsLimit, float timeLimit, int COMPUTATION_START_TIME, int result, int resultLimit) {
int now = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
float diff = (now - COMPUTATION_START_TIME) / 1000.0;
bool anyLimit =
(resultMinDiff > 0 && generation > resultBufferSize && result < resultsBuffer[0] * (1 + resultMinDiff))
|| (generationsLimit > 0 && generation >= generationsLimit)
|| (resultLimit > 0 && result >= resultLimit)
|| (timeLimit > 0 && diff >= timeLimit);
if (generation > 0) {
resultsBuffer.push_back(result);
}
if (generation > resultBufferSize) {
resultsBuffer.erase(resultsBuffer.begin());
//cout << endl << "Current resultsBuffer[0]: " << resultsBuffer[0] << endl;
}
return anyLimit;
}
vector<string> getFileNames (string path) {
DIR *pDIR;
struct dirent *entry;
vector<string> fileNames;
if (pDIR=opendir(path.c_str())) {
while (entry = readdir(pDIR)) {
if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) {
fileNames.push_back(entry->d_name);
}
}
closedir(pDIR);
}
return fileNames;
}
/* pearson, spearman */
float mean (vector<float> values) {
float sum = 0;
int size = values.size();
for (int i = 0; i < size; i++) {
sum += values[i];
}
return sum / size;
}
float pearson_numerator (vector<float> A, vector<float> B, float meanA, float meanB) {
float numerator = 0;
for (int i = 0; i < A.size(); i++) {
numerator += (A[i] - meanA) * (B[i] - meanB);
}
return numerator;
}
float pearson_denominator (vector<float> A, vector<float> B, float meanA, float meanB) {
float denominator1;
float denominator1_sum = 0;
float denominator2;
float denominator2_sum = 0;
for (int i = 0; i < A.size(); i++) {
denominator1_sum += pow(A[i] - meanA, 2);
}
for (int i = 0; i < B.size(); i++) {
denominator2_sum += pow(B[i] - meanB, 2);
}
denominator1 = pow(denominator1_sum, 0.5);
denominator2 = pow(denominator2_sum, 0.5);
if (denominator1 == 0 || denominator2 == 0)
cout << endl << endl << "##### ERROR: Denominator equal to 0 - probable cause: all result values are equal" << endl << endl;
return denominator1 * denominator2;
}
float pearson (vector<float> A, vector<float> B) {
if (A.size() != B.size()) {
cout << "ERROR - wrong vector lengths" << endl;
return -1;
}
float meanA = mean(A);
float meanB = mean(B);
float numerator = pearson_numerator(A, B, meanA, meanB);
float denominator = pearson_denominator(A, B, meanA, meanB);
return numerator / denominator;
}
vector<float> toRank (vector<float> A) {
vector<float> sorted = A;
sort(sorted.begin(), sorted.end());
vector<float> rank;
for (int i = 0; i < A.size(); i++) {
vector<int> positions;
for (int j = 0; j < A.size(); j++) {
if (sorted[j] == A[i]) {
positions.push_back(j);
}
}
float sum = 0;
float avg;
for (int j = 0; j < positions.size(); j++) {
sum += positions[j] + 1;
}
avg = sum / positions.size();
rank.push_back(avg);
//rank.push_back(positions[positions.size()-1] + 1); //libreoffice calc rank
}
/*
cout << "Ranking: " << endl;
for (int i = 0; i < rank.size(); i++) {
cout << rank[i] << ", ";
}
cout << endl << endl;
*/
return rank;
}
float spearman (vector<float> A, vector<float> B) {
vector<float> A_ranked = toRank(A);
vector<float> B_ranked = toRank(B);
return pearson(A_ranked, B_ranked);
}
int main (int argc, char* argv[]) {
srand (time(NULL));
coutGPUStatus();
string _EXPERIMENT_ID = argv[1];
int tests = 100;
float timeLimit = 6; //seconds
int generationsLimit = 0; //5;
int resultLimit = 0; //32;
int resultBufferSize = 10;
float resultMinDiff = 0; //0.01;
bool saveResults = true;
bool saveResultsCorrelation = true;
float INFLUENCE_THRESHOLD = 0.5;
int N_MAX = 1000;
int STEPS_MAX = 10000;
int TO_FIND_PERCENTAGE = 5;
int THREADS_PER_BLOCK = 1024;
/* Parameters */
//int groupSize = 20; // 10, 20, 30 // 2, 5, 10, 20, 50
//int nrOfIndividuals = (int)ceil(N/10.0); // N/20, N/10, N/5 // 100, 500 1k, 2k, 10k
//float crossover_ratio = 0.7; // 0.5, 0.7, 0.9 // 0.1, 0.3, 0.5, 0.7, 0.9
//float mutation_potency = 0.01; // 0.001, 0.01, 0.1 // 0.01, 0.02, 0.05, 0.1, 0.2
//float mutation_ratio = 0.9; // 0.75, 0.9, 0.95, // 0.1, 0.3, 0.5, 0.7, 0.9
vector<int> a_groupSize {10, 20, 30}; // 10, 20, 30
vector<int> a_nrOfIndividuals {12, 10, 8}; // N/12, N/10, N/8
vector<float> a_crossover_ratio {0.6, 0.7, 0.8}; // 0.6, 0.7, 0.8
vector<float> a_mutation_potency {0.001, 0.01, 0.1}; // 0.001, 0.01, 0.1
vector<float> a_mutation_ratio {0.7, 0.8, 0.9}; // 0.7, 0.8, 0.9
int parameters_sets = a_groupSize.size() * a_nrOfIndividuals.size() * a_crossover_ratio.size() * a_mutation_potency.size() * a_mutation_ratio.size();
vector<string> datasets = getFileNames("./experiments_" + _EXPERIMENT_ID);
/* DEBUG */
int debug_nrOfIndividuals;
bool debug = true;
if (debug) {
tests = 10;
N_MAX = 1000;
THREADS_PER_BLOCK = 1024;
debug_nrOfIndividuals = -1; // -1 - the same as if it wasn't a debug mode (so devides N by a_nrOfIndividuals to get indivnr)
// tests: 10, debug_nrOfIndividuals: -1, generationsLimit: 1, THREADS_PER_BLOCK: 1024, default parameters, facebook
/* 100: 7 in 1ms, 500: 46 in 10ms, 1000: 88 in 53ms */
timeLimit = 0;
generationsLimit = 5; // 5 - 80s
resultLimit = 0;
resultMinDiff = 0;
saveResults = true;//false;
saveResultsCorrelation = true;//false;
a_groupSize = {20};
a_nrOfIndividuals = {8};
a_crossover_ratio = {0.7};
a_mutation_potency = {0.01};
a_mutation_ratio = {0.9};
parameters_sets = a_groupSize.size() * a_nrOfIndividuals.size() * a_crossover_ratio.size() * a_mutation_potency.size() * a_mutation_ratio.size();
//datasets = {"facebook-46952"};
//datasets = {"BA-1000-1-3.csv"};
datasets = {"ER-1000-0.05-10.csv"};
//datasets = getFileNames("./experiments_" + _EXPERIMENT_ID);
}
/*
N = 1000
INDIVIDUALS = 1000
THREADS_PER_BLOCK = 192
1 individuals - 0.056s
10 individuals - 0.081s
100 individuals - 0.265s
1000 individuals - 2.483s
THREADS_PER_BLOCK = 512
1000 individuals - 2.423s
THREADS_PER_BLOCK = 1024
1000 individuals - 2.481s
N = max (~47k for facebook)
THREADS_PER_BLOCK = 512
100 individuals - 5.08s
*/
vector<vector<float>> results;
for (int i=0; i<datasets.size(); i++) {
vector<float> row(parameters_sets, -1);
results.push_back(row);
}
for (int file_id=0; file_id<datasets.size(); file_id++) {
int dataset_id = file_id; //TODO to refactor
string dataset_name = datasets[file_id];
stringstream ssname(dataset_name);
string token;
getline(ssname, token, '-');
getline(ssname, token, '-');
int maxSize = stoi(token);
int N = min(N_MAX, maxSize);
int toFind = (int)ceil(float(TO_FIND_PERCENTAGE * N) / 100.0);
// using ofstream constructors.
std::ofstream outfile("results_" + dataset_name + "_" + _EXPERIMENT_ID + "_" + ".xls");
if (saveResults) {
outfile << "<?xml version='1.0'?>" << std::endl;
outfile << "<Workbook xmlns='urn:schemas-microsoft-com:office:spreadsheet'" << std::endl;
outfile << " xmlns:o='urn:schemas-microsoft-com:office:office'" << std::endl;
outfile << " xmlns:x='urn:schemas-microsoft-com:office:excel'" << std::endl;
outfile << " xmlns:ss='urn:schemas-microsoft-com:office:spreadsheet'" << std::endl;
outfile << " xmlns:html='http://www.w3.org/TR/REC-html40'>" << std::endl;
outfile << " <Worksheet ss:Name='Sheet1'>" << std::endl;
outfile << " <Table>" << std::endl;
outfile << " <Row>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>Dataset</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>Test nr</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>groupSize</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>nrOfIndividuals</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>crossover_ratio</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>mutation_potency</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>mutation_ratio</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>Generations</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>Result</Data></Cell>" << std::endl;
outfile << " </Row>" << std::endl;
}
vector <float> inf_col_ind;
vector <float> inf_row_ptr;
vector <float> inf_values;
defineInfluenceArrayAndVectors(dataset_name, N, inf_values, inf_col_ind, inf_row_ptr, _EXPERIMENT_ID);
int inf_values_size = inf_values.size();
int parameters_set = 1;
for_each(a_groupSize.begin(), a_groupSize.end(), [&] (int groupSize) {
for_each(a_nrOfIndividuals.begin(), a_nrOfIndividuals.end(), [&] (int nrOfIndividualsRaw) {
int nrOfIndividuals = (int)ceil(N/nrOfIndividualsRaw);
if (debug && debug_nrOfIndividuals != -1) {
nrOfIndividuals = debug_nrOfIndividuals;
}
for_each(a_crossover_ratio.begin(), a_crossover_ratio.end(), [&] (float crossover_ratio) {
for_each(a_mutation_potency.begin(), a_mutation_potency.end(), [&] (float mutation_potency) {
for_each(a_mutation_ratio.begin(), a_mutation_ratio.end(), [&] (float mutation_ratio) {
float testsResultsSum = 0;
float testsGenerationsSum = 0;
float testsTimeSum = 0;
for (int test = 0; test < tests; test++) {
vector <int> max_fitness_individual;
vector <vector<int>> population;
int max_fitness_value = -1;
int progressBarLength = 10;
int generation = 0;
vector<int> resultsBuffer;
createPopulation(nrOfIndividuals, N, toFind, population);
//createPopulationSample(nrOfIndividuals, N, toFind, population);
//coutPopulation(population);
int COMPUTATION_START_TIME = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
while (!anyLimitReached(resultBufferSize, resultMinDiff, resultsBuffer, generation, generationsLimit, timeLimit, COMPUTATION_START_TIME, max_fitness_value, resultLimit)) {
//coutGPUStatus();
F_TIME_START();
performPopulationSelection(population, nrOfIndividuals, N, inf_values_size, INFLUENCE_THRESHOLD, groupSize, STEPS_MAX, inf_values, inf_col_ind, inf_row_ptr, toFind, max_fitness_value, max_fitness_individual, THREADS_PER_BLOCK);
F_TIME_END("selection");
F_TIME_START();
performCrossover(population, nrOfIndividuals, crossover_ratio, toFind);
F_TIME_END("crossover");
F_TIME_START();
performMutation(population, nrOfIndividuals, mutation_ratio, mutation_potency, toFind, N);
F_TIME_END("mutation");
//coutResult(generation, max_fitness_value);
generation++;
}
int COMPUTATION_END_TIME = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
int COMPUTATION_DURATION = COMPUTATION_END_TIME - COMPUTATION_START_TIME;
cout << endl << "[FINISHED] test: " << test+1 << "/" << tests
<< " for parameters set nr: " << parameters_set << "/" << parameters_sets
<< " for dataset_id: " << dataset_id+1 << "/" << datasets.size()
<< " in: " << COMPUTATION_DURATION / 1000.0 << "s";
cout << endl;
coutGPUStatus();
cout << endl;
if (saveResults) {
outfile << " <Row>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(parameters_set) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(test+1) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(groupSize) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(nrOfIndividuals) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(crossover_ratio) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(mutation_potency) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(mutation_ratio) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(generation) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(max_fitness_value) + "</Data></Cell>" << std::endl;
outfile << " </Row>" << std::endl;
}
//cout << endl << "result " << test+1 << ": " << max_fitness_value << endl;
testsResultsSum += max_fitness_value;
testsGenerationsSum += generation;
testsTimeSum += COMPUTATION_DURATION;
/*cout << "Best individual found: " << endl;
for (int i=0; i<max_fitness_individual.size(); i++) {
cout << max_fitness_individual[i] << ", ";
}*/
//cout << endl << endl << "This group can activate " << max_fitness_value << " others";
//cout << endl << "Time elapsed: " << (time2 - COMPUTATION_START_TIME) / 1000.0 << "s" << endl;
} // TEST
float finalResult = std::round(testsResultsSum / tests);
float finalGenerations = std::round(testsGenerationsSum / tests);
float finalTime = std::round(testsTimeSum / tests);
cout << endl << "Final result avg: " << finalResult << " in avg " << finalTime / 1000.0 << "s" << endl;
results[file_id][parameters_set-1] = finalResult;
if (saveResults) {
outfile << " <Row>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(parameters_set) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>AVG </Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(groupSize) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(nrOfIndividuals) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(crossover_ratio) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(mutation_potency) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(mutation_ratio) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(finalGenerations) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(finalResult) + "</Data></Cell>" << std::endl;
outfile << " </Row>" << std::endl;
}
parameters_set++;
});
});
});
});
});
if (saveResults) {
outfile << " </Table>" << std::endl;
outfile << " </Worksheet>" << std::endl;
outfile << "</Workbook>" << std::endl;
}
outfile.close();
}
cout << endl << endl << "*** RESULTS ***" << endl;
for (int i=0; i<datasets.size(); i++) {
for (int j=0; j<parameters_sets; j++) {
cout << results[i][j] << ", ";
}
cout << endl;
}
if (saveResultsCorrelation) {
// using ofstream constructors.
std::ofstream outfile("results_correlation_" + _EXPERIMENT_ID + "_.xls");
outfile << "<?xml version='1.0'?>" << std::endl;
outfile << "<Workbook xmlns='urn:schemas-microsoft-com:office:spreadsheet'" << std::endl;
outfile << " xmlns:o='urn:schemas-microsoft-com:office:office'" << std::endl;
outfile << " xmlns:x='urn:schemas-microsoft-com:office:excel'" << std::endl;
outfile << " xmlns:ss='urn:schemas-microsoft-com:office:spreadsheet'" << std::endl;
outfile << " xmlns:html='http://www.w3.org/TR/REC-html40'>" << std::endl;
outfile << " <Worksheet ss:Name='Sheet1'>" << std::endl;
outfile << " <Table>" << std::endl;
outfile << " <Row>" << std::endl;
outfile << " <Cell></Cell>" << std::endl;
for (int i=0; i<datasets.size(); i++) {
outfile << " <Cell><Data ss:Type='String'>" + datasets[i] + "</Data></Cell>" << std::endl;
}
outfile << " </Row>" << std::endl;
for (int i=0; i<datasets.size(); i++) {
outfile << " <Row>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>" + datasets[i] + "</Data></Cell>" << std::endl;
for (int j=0; j<datasets.size(); j++) {
if (j > i) {
outfile << " <Cell><Data ss:Type='Number'>" + to_string(pearson(results[i], results[j])) + "</Data></Cell>" << std::endl;
} else {
outfile << " <Cell></Cell>" << std::endl;
}
}
outfile << " </Row>" << std::endl;
}
outfile << " <Row></Row>" << std::endl;
outfile << " <Row></Row>" << std::endl;
outfile << " <Row></Row>" << std::endl;
outfile << " <Row>" << std::endl;
outfile << " <Cell></Cell>" << std::endl;
for (int i=0; i<datasets.size(); i++) {
outfile << " <Cell><Data ss:Type='String'>" + datasets[i] + "</Data></Cell>" << std::endl;
}
outfile << " </Row>" << std::endl;
for (int i=0; i<datasets.size(); i++) {
outfile << " <Row>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>" + datasets[i] + "</Data></Cell>" << std::endl;
for (int j=0; j<datasets.size(); j++) {
if (j > i) {
outfile << " <Cell><Data ss:Type='Number'>" + to_string(spearman(results[i], results[j])) + "</Data></Cell>" << std::endl;
} else {
outfile << " <Cell></Cell>" << std::endl;
}
}
outfile << " </Row>" << std::endl;
}
outfile << " </Table>" << std::endl;
outfile << " </Worksheet>" << std::endl;
outfile << "</Workbook>" << std::endl;
outfile.close();
}
return 0;
}
|
5e2ada978eaa1917e75e5bd04d4b55e213250a5c.cu
|
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <vector>
#include <sys/time.h>
#include <chrono>
#include <dirent.h>
using namespace std::chrono;
using namespace std;
vector<int> G_timestamps;
int getCurrentTime () {
return duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
}
void F_TIME_START () {
G_timestamps.push_back(getCurrentTime());
}
void F_TIME_END (string measuredName) {
int start = G_timestamps.back();
int end = getCurrentTime();
float diff = (end - start) / 1000.0;
G_timestamps.pop_back();
cout << endl << "## [" << measuredName << "]: " << diff << "s" << endl << endl;
}
void coutGPUStatus () {
size_t freem, totalm;
float free_m, total_m, used_m;
cudaMemGetInfo((size_t*)&freem, (size_t*)&totalm);
free_m = (size_t) freem / 1048576.0;
total_m = (size_t) totalm / 1048576.0;
used_m = total_m - free_m;
printf ( "## Total: %f MB. Used %f MB. Free: %f MB. \n", total_m, used_m, free_m);
}
void coutResult(int& generation, int& max_fitness_value) {
cout << "Generation " << generation << ", currently best individual can activate " << max_fitness_value << " others" << endl;
}
void coutPopulation (vector <vector<int>>& population) {
cout << "Population:";
for (int i=0; i<population.size(); i++) {
cout << "\nIndiv: " << i << ": ";
for (int j=0; j<population[i].size(); j++) {
if (population[i][j] < 10) {
cout << population[i][j] << ", ";
}
else if (population[i][j] < 100) {
cout << population[i][j] << ", ";
}
else if (population[i][j] < 1000) {
cout << population[i][j] << ", ";
}
else if (population[i][j] < 10000) {
cout << population[i][j] << ", ";
}
else {
cout << population[i][j] << ",";
}
}
}
cout << "\n\n";
}
void coutIndividual (vector <vector<int>>& population, int i) {
cout << "Individual " << i << ":";
for (int j=0; j<population[i].size(); j++) {
if (population[i][j] < 10) {
cout << population[i][j] << ", ";
}
else if (population[i][j] < 100) {
cout << population[i][j] << ", ";
}
else if (population[i][j] < 1000) {
cout << population[i][j] << ", ";
}
else if (population[i][j] < 10000) {
cout << population[i][j] << ", ";
}
else {
cout << population[i][j] << ",";
}
}
cout << "\n\n";
}
float getInfluenceValue (int N, int inf_values_size, vector<float>& inf_values, vector<float>& inf_col_ind, vector<float>& inf_row_ptr, int x, int y) {
float infValue = 0;
int min = inf_row_ptr[x];
int max = x == N-1 ? inf_values_size-1 : inf_row_ptr[x+1]; //inf_values_size-1
for (int i=min; i<max; i++) {
if (inf_col_ind[i] == y) {
infValue = inf_values[i];
break;
}
}
return infValue;
}
void InfluenceSpreadPopulationStep (bool *dyn_activeNodesPerIndividual, vector<float>& inf_values, vector<float>& inf_col_ind, vector<float>& inf_row_ptr, int N, int nrOfChangedIndividuals, int inf_values_size, float INFLUENCE_THRESHOLD, vector<int>& changedIndividuals) {
for (int indiv_id = 0; indiv_id < nrOfChangedIndividuals; indiv_id++) {
for (int node_id = 0; node_id < N; node_id++) {
int indiv_index = changedIndividuals[indiv_id];
float infValue = 0; // total value of influence on the node
for (int i=0; i<N; i++) {
if (dyn_activeNodesPerIndividual[indiv_index * N + i] && node_id != i) { // if i-th element is active and is not the node
float result = getInfluenceValue(N, inf_values_size, inf_values, inf_col_ind, inf_row_ptr, i, node_id);
infValue += result; // add i-th element influence on the node
//printf("Influence %d on %d is: %f\n", i, node_id, result);
//printf("\ninfValue: %f, id: %d", infValue, id);
}
}
//printf("\ninfValue: %f, id: %d", infValue, id);
if (infValue >= INFLUENCE_THRESHOLD) { // if total influence on the node is greater than or equal to the INFLUENCE_THRESHOLD value
dyn_activeNodesPerIndividual[indiv_index * N + node_id] = true; // activate the node
}
}
}
}
vector <vector<float>> readData (string dataset_name, int N, string _EXPERIMENT_ID) {
vector <vector<float>> influence;
// initialization of the influence vector
for (int i=0; i<N; i++) {
cout << endl << i << " out of " << N << endl;
vector<float> row(N, 0);
influence.push_back(row);
if ((i + 1) * N % (N * N / 10) == 0) {
cout << "[Initialization of the influence matrix]: " << float((i + 1) * N) / (N * N) * 100 << "%" << endl;
}
}
// total number of interactions received by every node
vector<float> received(N, 0);
ifstream infile("./experiments_" + _EXPERIMENT_ID + "/" + dataset_name);
string line;
int _csv_id_hack = -1;
if (dataset_name.find(".csv") != std::string::npos) {
_csv_id_hack = 0;
}
if (infile.good()) {
int line_nr = 0;
while (getline(infile, line)) {
cout << "Reading raw data file, line nr: " << line_nr << endl;
//cout << line << endl;
istringstream iss(line);
int a, b;
if (!(iss >> a >> b)) { cout << "ERROR" << endl; break; } // error
if (a != b && a + _csv_id_hack < N && b + _csv_id_hack < N) {
influence[a + _csv_id_hack][b + _csv_id_hack] += 1; // temp inf_values, calculating the total number of iteractions from "i" to "j"
received [b + _csv_id_hack] += 1;
//cout << "message from " << a + _csv_id_hack << " to " << b + _csv_id_hack << endl;
}
line_nr++;
}
infile.close();
cout << "File reading finished successfully." << endl;
ofstream outfile ("./experiments-counted/" + dataset_name + "_influenceCounted_" + to_string(N));
if (outfile.good()) {
// Influence value calculated as the ratio of iteractions from "i" node to "j" node, to the total number of iteractions to the "j" node.
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
//cout << "Influence values calculations, step: " << i*N+(j+1) << "/" << N*N << endl;
if (i == j) {
outfile << i << " " << j << " " << -1 << "\n";
influence[i][j] = -1;
} else if (influence[i][j] > 0) {
if (received[j] != 0) {
influence[i][j] = influence[i][j] / received[j];
} else if (influence[i][j] != 0) {
cout << "Received array error";
}
/*cout << i << "'s influence on " << j << " equals: " << influence[i][j] << endl;*/
outfile << i << " " << j << " " << influence[i][j] << "\n";
} else {
influence[i][j] = 0;
}
}
}
cout << "Compressed file saved successfully." << endl;
outfile.close();
} else {
throw std::invalid_argument("readData - File " + dataset_name + " not saved.");
}
} else {
throw std::invalid_argument("readData - File " + dataset_name + " not found.");
}
return influence;
}
void defineInfluenceArrayAndVectors (string dataset_name, int N, vector<float>& inf_values, vector<float>& inf_col_ind, vector<float>& inf_row_ptr, string _EXPERIMENT_ID) {
//cout << "File reading started." << endl;
ifstream infile("./experiments-counted/" + dataset_name + "_influenceCounted_" + to_string(N));
if (infile.good()) { // reading the already calculated influence values
int line_nr = 0;
string line;
float last_a = -1;
while (getline(infile, line)) {
cout << "Reading influence file, line nr: " << line_nr << endl;
istringstream iss(line);
float a, b, c;
if (!(iss >> a >> b >> c)) { break; } // error
if (c != 0) {
if (a != last_a) {
inf_row_ptr.push_back(inf_values.size());
//cout << "add row ptr: " << inf_values.size() << endl;
last_a = a;
}
inf_values.push_back(c);
//cout << "add value: " << c << endl;
inf_col_ind.push_back(b);
//cout << "add col ind: " << b << endl;
}
line_nr++;
}
infile.close();
} else { // calculating influnce values
infile.close();
vector <vector<float>> influence = readData(dataset_name, N, _EXPERIMENT_ID);
// inf_values, inf_col_ind, inf_row_ptr creation, based on the influence array
for (int i=0; i<N; i++) {
bool added = false;
for (int j=0; j<N; j++) {
//cout << "Influence of " << i << " on " << j << " is equal to: " << influence[i][j] << endl;
if (influence[i][j] != 0) {
if (!added) {
inf_row_ptr.push_back(inf_values.size());
//cout << "add row ptr: " << inf_values.size() << endl;
added = true;
}
inf_values.push_back(influence[i][j]);
//cout << "add value: " << influence[i][j] << endl;
inf_col_ind.push_back(j);
//cout << "add col ind: " << j << endl;
}
}
if (!added) {
//inf_row_ptr.push_back(-1);
}
}
/*cout << "\n\n size of influence array: " << sizeof(influence) + sizeof(float) * influence.capacity() * influence.capacity();
cout << "\n\n Total size of vectors: "
<< sizeof(inf_values) + sizeof(float) * inf_values.capacity()
+ sizeof(inf_col_ind) + sizeof(float) * inf_col_ind.capacity()
+ sizeof(inf_row_ptr) + sizeof(float) * inf_row_ptr.capacity() << "\n\n";*/
}
}
void createPopulation (int nrOfIndividuals, int N, int toFind, vector <vector<int>>& population) {
// creating random individuals within population
for (int i = 0; i<nrOfIndividuals; i++) {
vector<int> row;
population.push_back(row);
cout << "Creating individual " << i << " of " << nrOfIndividuals << endl;
for (int j = 0; j<toFind; j++) {
int rand_id = rand() % N;
bool alreadyAdded = true;
while (alreadyAdded) {
alreadyAdded = false;
for (int k=0; k<population[i].size(); k++) {
if (population[i][k] == rand_id) {
alreadyAdded = true;
rand_id = rand() % N;
}
}
}
//cout << "pushing: " << rand_id << endl;
population[i].push_back(rand_id);
}
}
}
void createPopulationSample (int nrOfIndividuals, int N, int toFind, vector <vector<int>>& population) {
// creating one individual - used as a sample e.g. for GPU vs CPU tests
vector<int> row;
population.push_back(row);
for (int x = 0; x<toFind; x++) {
population[0].push_back(x);
}
}
void setPopulationFitness (vector<vector<int>>& population, int nrOfIndividuals, int N, int inf_values_size, float& INFLUENCE_THRESHOLD, int STEPS_MAX, vector<float>& inf_values, vector<float>& inf_col_ind, vector<float>& inf_row_ptr, int toFind, vector<int>& fitness, int THREADS_PER_BLOCK) {
//bool activeNodesPerIndividual[nrOfIndividuals][N];
bool *dyn_activeNodesPerIndividual = new bool[nrOfIndividuals*N];
for (int i=0; i<nrOfIndividuals; i++) {
for (int j=0; j<N; j++) {
int index = N * i + j;
dyn_activeNodesPerIndividual[index] = false;
}
for (int j=0; j<toFind; j++) {
int index = N * i + population[i][j];
dyn_activeNodesPerIndividual[index] = true;
}
}
int active [nrOfIndividuals];
vector<int> changedIndividuals;
for (int i=0; i<nrOfIndividuals; i++) {
active[i] = toFind;
changedIndividuals.push_back(i);
}
int step_counter = 0;
while (step_counter < STEPS_MAX && changedIndividuals.size() > 0) {
//cout << "Step: " << step_counter << " / " << STEPS_MAX << endl;
int nrOfChangedIndividuals = changedIndividuals.size();
cout << "nrOfChangedIndividuals " << nrOfChangedIndividuals << endl;
F_TIME_START();
InfluenceSpreadPopulationStep (dyn_activeNodesPerIndividual, inf_values, inf_col_ind, inf_row_ptr, N, nrOfChangedIndividuals, inf_values_size, INFLUENCE_THRESHOLD, changedIndividuals);
F_TIME_END("host functions");
changedIndividuals.clear();
int curr_active;
for (int i=0; i<nrOfIndividuals; i++) {
curr_active = 0;
for (int j=0; j<N; j++) {
int index = N * i + j;
if (dyn_activeNodesPerIndividual[index]) {
curr_active++;
}
}
if (curr_active != active[i]) {
changedIndividuals.push_back(i);
}
active[i] = curr_active;
}
step_counter++;
}
for (int i = 0; i < nrOfIndividuals; i++) {
int individualFitness = 0;
for (int j = 0; j < N; j++) {
int index = N * i + j;
if (dyn_activeNodesPerIndividual[index]) {
individualFitness++;
//cout << "Activated " << j << endl;
}
}
//cout << "individualFitness: " << individualFitness << endl;
//cout << "toFind: " << toFind << endl;
// acceptable `error`
/*if (individualFitness-toFind < 0) {
cout << "# Crossover/mutation overlapping" << endl; // can happen because of random crossover and mutation
//coutIndividual(population, i);
}*/
//cout << "fitness Indiv: " << i << ": " << individualFitness-toFind << endl;
fitness.push_back(individualFitness-toFind);
}
}
void performPopulationSelection (vector<vector<int>>& population, int& nrOfIndividuals, int N, int inf_values_size, float& INFLUENCE_THRESHOLD, int& groupSize, int& STEPS_MAX, vector<float>& inf_values, vector<float>& inf_col_ind, vector<float>& inf_row_ptr, int& toFind, int& max_fitness_value, vector<int>& max_fitness_individual, int THREADS_PER_BLOCK) {
vector<int> fitness;
F_TIME_START();
setPopulationFitness(population, nrOfIndividuals, N, inf_values_size, INFLUENCE_THRESHOLD, STEPS_MAX, inf_values, inf_col_ind, inf_row_ptr, toFind, fitness, THREADS_PER_BLOCK);
F_TIME_END("selection - fitness count");
F_TIME_START();
vector<vector<int>> newPopulation;
while (newPopulation.size() != population.size()) {
vector<int> newGroup;
bool alreadyAdded[nrOfIndividuals];
for (int i=0; i<nrOfIndividuals; i++) {
alreadyAdded[i] = false;
}
for (int j=0; j<groupSize; j++) {
int randIndiv = rand() % nrOfIndividuals;
while (alreadyAdded[randIndiv]) {
randIndiv = rand() % nrOfIndividuals;
}
newGroup.push_back(randIndiv);
}
int curr_best_fitness = -1;
int curr_best_id = -1;
int currentFitness = -1;
for (int j=0; j<newGroup.size(); j++) {
currentFitness = fitness[newGroup[j]];
if (currentFitness > curr_best_fitness) {
curr_best_fitness = currentFitness;
curr_best_id = j;
}
}
newPopulation.push_back(population[newGroup[curr_best_id]]);
if (curr_best_fitness > max_fitness_value) {
max_fitness_individual = population[newGroup[curr_best_id]];
max_fitness_value = curr_best_fitness;
}
}
population = newPopulation;
F_TIME_END("selection - population swapping");
}
// TODO performCrossover on DEVICE (nrOfIndividuals/2 threads (from 0 to nr/2 - 1), ids: id*2, id*2+1
void performCrossover (vector<vector<int>>& population, int& nrOfIndividuals, float& crossover_ratio, int& toFind) {
float split_ratio = 0.5;
float split_point = split_ratio*toFind;
int id_first = -1;
int id_second = -1;
for (int i=0; i<nrOfIndividuals; i++) {
int cross = rand() % 100;
if (cross < crossover_ratio * 100) {
if (id_first == -1) {
id_first = i;
} else {
id_second = i;
}
}
if (id_second != -1) {
for (int j=0; j<split_point; j++) {
float temp = population[id_first][j];
population[id_first][j] = population[id_second][j];
population[id_second][j] = temp;
}
id_first = -1;
id_second = -1;
}
} // allows to node doubling (fitness = -1 can happen)
}
// TODO performMutation on DEVICE
void performMutation (vector<vector<int>>& population, int& nrOfIndividuals, float& mutation_ratio, float& mutation_potency, int& toFind, int N) {
for (int i=0; i<nrOfIndividuals; i++) {
int mutation = rand() % 100;
if (mutation < mutation_ratio * 100) {
for (int j=0; j<mutation_potency*toFind; j++) {
population[i][rand() % toFind] = rand() % N;
}
}
} // allows to node doubling (fitness = -1 can happen)
}
bool anyLimitReached(int resultBufferSize, float resultMinDiff, vector<int> &resultsBuffer, int generation, int generationsLimit, float timeLimit, int COMPUTATION_START_TIME, int result, int resultLimit) {
int now = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
float diff = (now - COMPUTATION_START_TIME) / 1000.0;
bool anyLimit =
(resultMinDiff > 0 && generation > resultBufferSize && result < resultsBuffer[0] * (1 + resultMinDiff))
|| (generationsLimit > 0 && generation >= generationsLimit)
|| (resultLimit > 0 && result >= resultLimit)
|| (timeLimit > 0 && diff >= timeLimit);
if (generation > 0) {
resultsBuffer.push_back(result);
}
if (generation > resultBufferSize) {
resultsBuffer.erase(resultsBuffer.begin());
//cout << endl << "Current resultsBuffer[0]: " << resultsBuffer[0] << endl;
}
return anyLimit;
}
vector<string> getFileNames (string path) {
DIR *pDIR;
struct dirent *entry;
vector<string> fileNames;
if (pDIR=opendir(path.c_str())) {
while (entry = readdir(pDIR)) {
if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) {
fileNames.push_back(entry->d_name);
}
}
closedir(pDIR);
}
return fileNames;
}
/* pearson, spearman */
float mean (vector<float> values) {
float sum = 0;
int size = values.size();
for (int i = 0; i < size; i++) {
sum += values[i];
}
return sum / size;
}
float pearson_numerator (vector<float> A, vector<float> B, float meanA, float meanB) {
float numerator = 0;
for (int i = 0; i < A.size(); i++) {
numerator += (A[i] - meanA) * (B[i] - meanB);
}
return numerator;
}
float pearson_denominator (vector<float> A, vector<float> B, float meanA, float meanB) {
float denominator1;
float denominator1_sum = 0;
float denominator2;
float denominator2_sum = 0;
for (int i = 0; i < A.size(); i++) {
denominator1_sum += pow(A[i] - meanA, 2);
}
for (int i = 0; i < B.size(); i++) {
denominator2_sum += pow(B[i] - meanB, 2);
}
denominator1 = pow(denominator1_sum, 0.5);
denominator2 = pow(denominator2_sum, 0.5);
if (denominator1 == 0 || denominator2 == 0)
cout << endl << endl << "##### ERROR: Denominator equal to 0 - probable cause: all result values are equal" << endl << endl;
return denominator1 * denominator2;
}
float pearson (vector<float> A, vector<float> B) {
if (A.size() != B.size()) {
cout << "ERROR - wrong vector lengths" << endl;
return -1;
}
float meanA = mean(A);
float meanB = mean(B);
float numerator = pearson_numerator(A, B, meanA, meanB);
float denominator = pearson_denominator(A, B, meanA, meanB);
return numerator / denominator;
}
vector<float> toRank (vector<float> A) {
vector<float> sorted = A;
sort(sorted.begin(), sorted.end());
vector<float> rank;
for (int i = 0; i < A.size(); i++) {
vector<int> positions;
for (int j = 0; j < A.size(); j++) {
if (sorted[j] == A[i]) {
positions.push_back(j);
}
}
float sum = 0;
float avg;
for (int j = 0; j < positions.size(); j++) {
sum += positions[j] + 1;
}
avg = sum / positions.size();
rank.push_back(avg);
//rank.push_back(positions[positions.size()-1] + 1); //libreoffice calc rank
}
/*
cout << "Ranking: " << endl;
for (int i = 0; i < rank.size(); i++) {
cout << rank[i] << ", ";
}
cout << endl << endl;
*/
return rank;
}
float spearman (vector<float> A, vector<float> B) {
vector<float> A_ranked = toRank(A);
vector<float> B_ranked = toRank(B);
return pearson(A_ranked, B_ranked);
}
int main (int argc, char* argv[]) {
srand (time(NULL));
coutGPUStatus();
string _EXPERIMENT_ID = argv[1];
int tests = 100;
float timeLimit = 6; //seconds
int generationsLimit = 0; //5;
int resultLimit = 0; //32;
int resultBufferSize = 10;
float resultMinDiff = 0; //0.01;
bool saveResults = true;
bool saveResultsCorrelation = true;
float INFLUENCE_THRESHOLD = 0.5;
int N_MAX = 1000;
int STEPS_MAX = 10000;
int TO_FIND_PERCENTAGE = 5;
int THREADS_PER_BLOCK = 1024;
/* Parameters */
//int groupSize = 20; // 10, 20, 30 // 2, 5, 10, 20, 50
//int nrOfIndividuals = (int)ceil(N/10.0); // N/20, N/10, N/5 // 100, 500 1k, 2k, 10k
//float crossover_ratio = 0.7; // 0.5, 0.7, 0.9 // 0.1, 0.3, 0.5, 0.7, 0.9
//float mutation_potency = 0.01; // 0.001, 0.01, 0.1 // 0.01, 0.02, 0.05, 0.1, 0.2
//float mutation_ratio = 0.9; // 0.75, 0.9, 0.95, // 0.1, 0.3, 0.5, 0.7, 0.9
vector<int> a_groupSize {10, 20, 30}; // 10, 20, 30
vector<int> a_nrOfIndividuals {12, 10, 8}; // N/12, N/10, N/8
vector<float> a_crossover_ratio {0.6, 0.7, 0.8}; // 0.6, 0.7, 0.8
vector<float> a_mutation_potency {0.001, 0.01, 0.1}; // 0.001, 0.01, 0.1
vector<float> a_mutation_ratio {0.7, 0.8, 0.9}; // 0.7, 0.8, 0.9
int parameters_sets = a_groupSize.size() * a_nrOfIndividuals.size() * a_crossover_ratio.size() * a_mutation_potency.size() * a_mutation_ratio.size();
vector<string> datasets = getFileNames("./experiments_" + _EXPERIMENT_ID);
/* DEBUG */
int debug_nrOfIndividuals;
bool debug = true;
if (debug) {
tests = 10;
N_MAX = 1000;
THREADS_PER_BLOCK = 1024;
debug_nrOfIndividuals = -1; // -1 - the same as if it wasn't a debug mode (so devides N by a_nrOfIndividuals to get indivnr)
// tests: 10, debug_nrOfIndividuals: -1, generationsLimit: 1, THREADS_PER_BLOCK: 1024, default parameters, facebook
/* 100: 7 in 1ms, 500: 46 in 10ms, 1000: 88 in 53ms */
timeLimit = 0;
generationsLimit = 5; // 5 - 80s
resultLimit = 0;
resultMinDiff = 0;
saveResults = true;//false;
saveResultsCorrelation = true;//false;
a_groupSize = {20};
a_nrOfIndividuals = {8};
a_crossover_ratio = {0.7};
a_mutation_potency = {0.01};
a_mutation_ratio = {0.9};
parameters_sets = a_groupSize.size() * a_nrOfIndividuals.size() * a_crossover_ratio.size() * a_mutation_potency.size() * a_mutation_ratio.size();
//datasets = {"facebook-46952"};
//datasets = {"BA-1000-1-3.csv"};
datasets = {"ER-1000-0.05-10.csv"};
//datasets = getFileNames("./experiments_" + _EXPERIMENT_ID);
}
/*
N = 1000
INDIVIDUALS = 1000
THREADS_PER_BLOCK = 192
1 individuals - 0.056s
10 individuals - 0.081s
100 individuals - 0.265s
1000 individuals - 2.483s
THREADS_PER_BLOCK = 512
1000 individuals - 2.423s
THREADS_PER_BLOCK = 1024
1000 individuals - 2.481s
N = max (~47k for facebook)
THREADS_PER_BLOCK = 512
100 individuals - 5.08s
*/
vector<vector<float>> results;
for (int i=0; i<datasets.size(); i++) {
vector<float> row(parameters_sets, -1);
results.push_back(row);
}
for (int file_id=0; file_id<datasets.size(); file_id++) {
int dataset_id = file_id; //TODO to refactor
string dataset_name = datasets[file_id];
stringstream ssname(dataset_name);
string token;
getline(ssname, token, '-');
getline(ssname, token, '-');
int maxSize = stoi(token);
int N = min(N_MAX, maxSize);
int toFind = (int)ceil(float(TO_FIND_PERCENTAGE * N) / 100.0);
// using ofstream constructors.
std::ofstream outfile("results_" + dataset_name + "_" + _EXPERIMENT_ID + "_" + ".xls");
if (saveResults) {
outfile << "<?xml version='1.0'?>" << std::endl;
outfile << "<Workbook xmlns='urn:schemas-microsoft-com:office:spreadsheet'" << std::endl;
outfile << " xmlns:o='urn:schemas-microsoft-com:office:office'" << std::endl;
outfile << " xmlns:x='urn:schemas-microsoft-com:office:excel'" << std::endl;
outfile << " xmlns:ss='urn:schemas-microsoft-com:office:spreadsheet'" << std::endl;
outfile << " xmlns:html='http://www.w3.org/TR/REC-html40'>" << std::endl;
outfile << " <Worksheet ss:Name='Sheet1'>" << std::endl;
outfile << " <Table>" << std::endl;
outfile << " <Row>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>Dataset</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>Test nr</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>groupSize</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>nrOfIndividuals</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>crossover_ratio</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>mutation_potency</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>mutation_ratio</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>Generations</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>Result</Data></Cell>" << std::endl;
outfile << " </Row>" << std::endl;
}
vector <float> inf_col_ind;
vector <float> inf_row_ptr;
vector <float> inf_values;
defineInfluenceArrayAndVectors(dataset_name, N, inf_values, inf_col_ind, inf_row_ptr, _EXPERIMENT_ID);
int inf_values_size = inf_values.size();
int parameters_set = 1;
for_each(a_groupSize.begin(), a_groupSize.end(), [&] (int groupSize) {
for_each(a_nrOfIndividuals.begin(), a_nrOfIndividuals.end(), [&] (int nrOfIndividualsRaw) {
int nrOfIndividuals = (int)ceil(N/nrOfIndividualsRaw);
if (debug && debug_nrOfIndividuals != -1) {
nrOfIndividuals = debug_nrOfIndividuals;
}
for_each(a_crossover_ratio.begin(), a_crossover_ratio.end(), [&] (float crossover_ratio) {
for_each(a_mutation_potency.begin(), a_mutation_potency.end(), [&] (float mutation_potency) {
for_each(a_mutation_ratio.begin(), a_mutation_ratio.end(), [&] (float mutation_ratio) {
float testsResultsSum = 0;
float testsGenerationsSum = 0;
float testsTimeSum = 0;
for (int test = 0; test < tests; test++) {
vector <int> max_fitness_individual;
vector <vector<int>> population;
int max_fitness_value = -1;
int progressBarLength = 10;
int generation = 0;
vector<int> resultsBuffer;
createPopulation(nrOfIndividuals, N, toFind, population);
//createPopulationSample(nrOfIndividuals, N, toFind, population);
//coutPopulation(population);
int COMPUTATION_START_TIME = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
while (!anyLimitReached(resultBufferSize, resultMinDiff, resultsBuffer, generation, generationsLimit, timeLimit, COMPUTATION_START_TIME, max_fitness_value, resultLimit)) {
//coutGPUStatus();
F_TIME_START();
performPopulationSelection(population, nrOfIndividuals, N, inf_values_size, INFLUENCE_THRESHOLD, groupSize, STEPS_MAX, inf_values, inf_col_ind, inf_row_ptr, toFind, max_fitness_value, max_fitness_individual, THREADS_PER_BLOCK);
F_TIME_END("selection");
F_TIME_START();
performCrossover(population, nrOfIndividuals, crossover_ratio, toFind);
F_TIME_END("crossover");
F_TIME_START();
performMutation(population, nrOfIndividuals, mutation_ratio, mutation_potency, toFind, N);
F_TIME_END("mutation");
//coutResult(generation, max_fitness_value);
generation++;
}
int COMPUTATION_END_TIME = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
int COMPUTATION_DURATION = COMPUTATION_END_TIME - COMPUTATION_START_TIME;
cout << endl << "[FINISHED] test: " << test+1 << "/" << tests
<< " for parameters set nr: " << parameters_set << "/" << parameters_sets
<< " for dataset_id: " << dataset_id+1 << "/" << datasets.size()
<< " in: " << COMPUTATION_DURATION / 1000.0 << "s";
cout << endl;
coutGPUStatus();
cout << endl;
if (saveResults) {
outfile << " <Row>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(parameters_set) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(test+1) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(groupSize) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(nrOfIndividuals) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(crossover_ratio) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(mutation_potency) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(mutation_ratio) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(generation) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(max_fitness_value) + "</Data></Cell>" << std::endl;
outfile << " </Row>" << std::endl;
}
//cout << endl << "result " << test+1 << ": " << max_fitness_value << endl;
testsResultsSum += max_fitness_value;
testsGenerationsSum += generation;
testsTimeSum += COMPUTATION_DURATION;
/*cout << "Best individual found: " << endl;
for (int i=0; i<max_fitness_individual.size(); i++) {
cout << max_fitness_individual[i] << ", ";
}*/
//cout << endl << endl << "This group can activate " << max_fitness_value << " others";
//cout << endl << "Time elapsed: " << (time2 - COMPUTATION_START_TIME) / 1000.0 << "s" << endl;
} // TEST
float finalResult = std::round(testsResultsSum / tests);
float finalGenerations = std::round(testsGenerationsSum / tests);
float finalTime = std::round(testsTimeSum / tests);
cout << endl << "Final result avg: " << finalResult << " in avg " << finalTime / 1000.0 << "s" << endl;
results[file_id][parameters_set-1] = finalResult;
if (saveResults) {
outfile << " <Row>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(parameters_set) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>AVG </Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(groupSize) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(nrOfIndividuals) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(crossover_ratio) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(mutation_potency) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(mutation_ratio) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(finalGenerations) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(finalResult) + "</Data></Cell>" << std::endl;
outfile << " </Row>" << std::endl;
}
parameters_set++;
});
});
});
});
});
if (saveResults) {
outfile << " </Table>" << std::endl;
outfile << " </Worksheet>" << std::endl;
outfile << "</Workbook>" << std::endl;
}
outfile.close();
}
cout << endl << endl << "*** RESULTS ***" << endl;
for (int i=0; i<datasets.size(); i++) {
for (int j=0; j<parameters_sets; j++) {
cout << results[i][j] << ", ";
}
cout << endl;
}
if (saveResultsCorrelation) {
// using ofstream constructors.
std::ofstream outfile("results_correlation_" + _EXPERIMENT_ID + "_.xls");
outfile << "<?xml version='1.0'?>" << std::endl;
outfile << "<Workbook xmlns='urn:schemas-microsoft-com:office:spreadsheet'" << std::endl;
outfile << " xmlns:o='urn:schemas-microsoft-com:office:office'" << std::endl;
outfile << " xmlns:x='urn:schemas-microsoft-com:office:excel'" << std::endl;
outfile << " xmlns:ss='urn:schemas-microsoft-com:office:spreadsheet'" << std::endl;
outfile << " xmlns:html='http://www.w3.org/TR/REC-html40'>" << std::endl;
outfile << " <Worksheet ss:Name='Sheet1'>" << std::endl;
outfile << " <Table>" << std::endl;
outfile << " <Row>" << std::endl;
outfile << " <Cell></Cell>" << std::endl;
for (int i=0; i<datasets.size(); i++) {
outfile << " <Cell><Data ss:Type='String'>" + datasets[i] + "</Data></Cell>" << std::endl;
}
outfile << " </Row>" << std::endl;
for (int i=0; i<datasets.size(); i++) {
outfile << " <Row>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>" + datasets[i] + "</Data></Cell>" << std::endl;
for (int j=0; j<datasets.size(); j++) {
if (j > i) {
outfile << " <Cell><Data ss:Type='Number'>" + to_string(pearson(results[i], results[j])) + "</Data></Cell>" << std::endl;
} else {
outfile << " <Cell></Cell>" << std::endl;
}
}
outfile << " </Row>" << std::endl;
}
outfile << " <Row></Row>" << std::endl;
outfile << " <Row></Row>" << std::endl;
outfile << " <Row></Row>" << std::endl;
outfile << " <Row>" << std::endl;
outfile << " <Cell></Cell>" << std::endl;
for (int i=0; i<datasets.size(); i++) {
outfile << " <Cell><Data ss:Type='String'>" + datasets[i] + "</Data></Cell>" << std::endl;
}
outfile << " </Row>" << std::endl;
for (int i=0; i<datasets.size(); i++) {
outfile << " <Row>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>" + datasets[i] + "</Data></Cell>" << std::endl;
for (int j=0; j<datasets.size(); j++) {
if (j > i) {
outfile << " <Cell><Data ss:Type='Number'>" + to_string(spearman(results[i], results[j])) + "</Data></Cell>" << std::endl;
} else {
outfile << " <Cell></Cell>" << std::endl;
}
}
outfile << " </Row>" << std::endl;
}
outfile << " </Table>" << std::endl;
outfile << " </Worksheet>" << std::endl;
outfile << "</Workbook>" << std::endl;
outfile.close();
}
return 0;
}
|
d17c3d9857998b865394082b35d6a6dbfd005ae2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <hip/hip_runtime.h>
//Code written by Alan Fleming
//CONSTANTS
#define MATRIXSIZE 8
#define BLOCKSIZE 4
void mul_matrix_cpu(float *M, float *N, float *P, int width){
for( int i = 0; i<width; i++){
for( int j = 0; j<width; j++){
float sum = 0;
for (int k = 0; k < width; k++){
sum += M[i * width + k] * N[k * width + j];
}
P[i * width + j] = sum;
}
}
}
__global__ void mul_matrix_gpu(float *M, float *N, float *P, int width) {
//Assuming matrix is width x width
//Assuming tile size = blockdim.x
__shared__ float ds_M[BLOCKSIZE * BLOCKSIZE];
__shared__ float ds_N[BLOCKSIZE * BLOCKSIZE];
//Calculate row and collumn
int row = blockIdx.y * blockDim.x + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
//initialize Pvalue
float Pvalue = 0;
for (int i = 0; i < (width / blockDim.x); ++i) {
//copy global memory into shared memory
ds_M[threadIdx.y * blockDim.x + threadIdx.x] = M[row * width + i * blockDim.x + threadIdx.x];
ds_N[threadIdx.y * blockDim.x + threadIdx.x] = N[col + (i * blockDim.x + threadIdx.y) * width];
//ensure all data is copied
__syncthreads();
//Preform partial multiplications
for(int k = 0; k < blockDim.x; ++k) {
Pvalue += ds_M[threadIdx.y * blockDim.x + k] * ds_N[k * blockDim.x + threadIdx.x];
}
__syncthreads();
}
//Load final product into output memory
P[row * width + col] = Pvalue;
}
bool verify(float *A, float *B, float *C, int width) {
//Tolerance to check
const float tolerance = 1e-6;
for(int i = 0; i < width; ++i){
for(int k = 0; k < width; ++k) {
float sum = 0;
for(int j = 0; j < width; ++j) {
sum += A[i * width + j] * B[j * width + k];
}
//get the absolute value of the error for comparison
float error = fabs(sum - C[i * width + k])/sum;
//Check if error is too large
if(error > tolerance) {
printf("TEST FAILED\n\n");
return false;
}
}
}
printf("TEST PASSED\n\n");
return true;
}
int main(int argc, char *argv[]){
//allocate system memory for array
float *a = (float *)malloc(sizeof(float) * MATRIXSIZE * MATRIXSIZE ); //first matrix
float *b = (float *)malloc(sizeof(float) * MATRIXSIZE * MATRIXSIZE ); //second matrix
float *c = (float *)malloc(sizeof(float) * MATRIXSIZE * MATRIXSIZE ); //resulting matrix
int init =1325;
for (int i=0;i<MATRIXSIZE;i++){
for (int j=0;j<MATRIXSIZE;j++){
init= 3125 * init % 6553;
a[i * MATRIXSIZE + j]= ( init -1000 ) % 6553;
b[i * MATRIXSIZE + j]= init % 251;
}
}
//get cpu start time
clock_t t1 = clock();
//run function
mul_matrix_cpu(a, b, c, MATRIXSIZE);
//get cpu stop time
clock_t t2 = clock();
//calculate runtime
float cpuTime = (float(t2 - t1)/CLOCKS_PER_SEC*1000);
//allocate memory on gpu
float *dev_a, *dev_b, *dev_c;
hipMalloc((void **)(&dev_a),MATRIXSIZE * MATRIXSIZE * sizeof(float));
hipMalloc((void **)(&dev_b),MATRIXSIZE * MATRIXSIZE * sizeof(float));
hipMalloc((void **)(&dev_c),MATRIXSIZE * MATRIXSIZE * sizeof(float));
//copy matrices to gpu
hipMemcpy(dev_a,a, MATRIXSIZE * MATRIXSIZE * sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(dev_b,b, MATRIXSIZE * MATRIXSIZE * sizeof(float),hipMemcpyHostToDevice);
//calculate dimensions for gpu
dim3 dimBlock(BLOCKSIZE,BLOCKSIZE);
dim3 dimGrid( ceil(double(MATRIXSIZE)/dimBlock.x), ceil(double(MATRIXSIZE) /dimBlock.y));
//Set up cuda events for recording runtime
hipEvent_t start,stop;
float gpuTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
// do some work on the GPU
hipLaunchKernelGGL(( mul_matrix_gpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_a, dev_b, dev_c, MATRIXSIZE);
//calculate runtime
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpuTime,start,stop);
//destroy cuda events
hipEventDestroy(start);
hipEventDestroy(stop);
//copy memory from device
hipMemcpy(c,dev_c, MATRIXSIZE * MATRIXSIZE * sizeof(int),hipMemcpyDeviceToHost);
//print results
printf("CPU Runtime: %f\nGpu Runtime: %f\nSpeedup: %f\n", (double)cpuTime, (double)gpuTime, double(cpuTime / gpuTime));
//verify results
verify(a,b,c, MATRIXSIZE);
//free memory
free(a);
free(b);
free(c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
//exit program
return 0;
}
|
d17c3d9857998b865394082b35d6a6dbfd005ae2.cu
|
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
//Code written by Alan Fleming
//CONSTANTS
#define MATRIXSIZE 8
#define BLOCKSIZE 4
void mul_matrix_cpu(float *M, float *N, float *P, int width){
for( int i = 0; i<width; i++){
for( int j = 0; j<width; j++){
float sum = 0;
for (int k = 0; k < width; k++){
sum += M[i * width + k] * N[k * width + j];
}
P[i * width + j] = sum;
}
}
}
__global__ void mul_matrix_gpu(float *M, float *N, float *P, int width) {
//Assuming matrix is width x width
//Assuming tile size = blockdim.x
__shared__ float ds_M[BLOCKSIZE * BLOCKSIZE];
__shared__ float ds_N[BLOCKSIZE * BLOCKSIZE];
//Calculate row and collumn
int row = blockIdx.y * blockDim.x + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
//initialize Pvalue
float Pvalue = 0;
for (int i = 0; i < (width / blockDim.x); ++i) {
//copy global memory into shared memory
ds_M[threadIdx.y * blockDim.x + threadIdx.x] = M[row * width + i * blockDim.x + threadIdx.x];
ds_N[threadIdx.y * blockDim.x + threadIdx.x] = N[col + (i * blockDim.x + threadIdx.y) * width];
//ensure all data is copied
__syncthreads();
//Preform partial multiplications
for(int k = 0; k < blockDim.x; ++k) {
Pvalue += ds_M[threadIdx.y * blockDim.x + k] * ds_N[k * blockDim.x + threadIdx.x];
}
__syncthreads();
}
//Load final product into output memory
P[row * width + col] = Pvalue;
}
bool verify(float *A, float *B, float *C, int width) {
//Tolerance to check
const float tolerance = 1e-6;
for(int i = 0; i < width; ++i){
for(int k = 0; k < width; ++k) {
float sum = 0;
for(int j = 0; j < width; ++j) {
sum += A[i * width + j] * B[j * width + k];
}
//get the absolute value of the error for comparison
float error = fabs(sum - C[i * width + k])/sum;
//Check if error is too large
if(error > tolerance) {
printf("TEST FAILED\n\n");
return false;
}
}
}
printf("TEST PASSED\n\n");
return true;
}
int main(int argc, char *argv[]){
//allocate system memory for array
float *a = (float *)malloc(sizeof(float) * MATRIXSIZE * MATRIXSIZE ); //first matrix
float *b = (float *)malloc(sizeof(float) * MATRIXSIZE * MATRIXSIZE ); //second matrix
float *c = (float *)malloc(sizeof(float) * MATRIXSIZE * MATRIXSIZE ); //resulting matrix
int init =1325;
for (int i=0;i<MATRIXSIZE;i++){
for (int j=0;j<MATRIXSIZE;j++){
init= 3125 * init % 6553;
a[i * MATRIXSIZE + j]= ( init -1000 ) % 6553;
b[i * MATRIXSIZE + j]= init % 251;
}
}
//get cpu start time
clock_t t1 = clock();
//run function
mul_matrix_cpu(a, b, c, MATRIXSIZE);
//get cpu stop time
clock_t t2 = clock();
//calculate runtime
float cpuTime = (float(t2 - t1)/CLOCKS_PER_SEC*1000);
//allocate memory on gpu
float *dev_a, *dev_b, *dev_c;
cudaMalloc((void **)(&dev_a),MATRIXSIZE * MATRIXSIZE * sizeof(float));
cudaMalloc((void **)(&dev_b),MATRIXSIZE * MATRIXSIZE * sizeof(float));
cudaMalloc((void **)(&dev_c),MATRIXSIZE * MATRIXSIZE * sizeof(float));
//copy matrices to gpu
cudaMemcpy(dev_a,a, MATRIXSIZE * MATRIXSIZE * sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b, MATRIXSIZE * MATRIXSIZE * sizeof(float),cudaMemcpyHostToDevice);
//calculate dimensions for gpu
dim3 dimBlock(BLOCKSIZE,BLOCKSIZE);
dim3 dimGrid( ceil(double(MATRIXSIZE)/dimBlock.x), ceil(double(MATRIXSIZE) /dimBlock.y));
//Set up cuda events for recording runtime
cudaEvent_t start,stop;
float gpuTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
// do some work on the GPU
mul_matrix_gpu<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, MATRIXSIZE);
//calculate runtime
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpuTime,start,stop);
//destroy cuda events
cudaEventDestroy(start);
cudaEventDestroy(stop);
//copy memory from device
cudaMemcpy(c,dev_c, MATRIXSIZE * MATRIXSIZE * sizeof(int),cudaMemcpyDeviceToHost);
//print results
printf("CPU Runtime: %f\nGpu Runtime: %f\nSpeedup: %f\n", (double)cpuTime, (double)gpuTime, double(cpuTime / gpuTime));
//verify results
verify(a,b,c, MATRIXSIZE);
//free memory
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
//exit program
return 0;
}
|
df6902fe65fc0f1f002679a86665a91d8815905e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <hip/driver_types.h>
void matrixAdd(float *,float *,float *,int,int);
__global__ void matrixAddKernel(float *,float *,float *,int);
int main(int argc, char * argv[]){
int i,j,nrow,ncol;//,rows,columns,i;
float *A, *B, *C;
nrow = atoi(argv[1]);
ncol = atoi(argv[2]);
if((nrow != ncol) || argc != 3){
printf("Number of rows should be equal to number of columns\n");
exit(EXIT_FAILURE);
}
int size = nrow * ncol * sizeof(float);
A = (float *)malloc(size);
B = (float *)malloc(size);
C = (float *)malloc(size);
srand(time(NULL));
for(i=0;i<nrow;i++)
{
for(j=0;j<ncol;j++)
{
B[i*ncol+j] = ((float)rand())/RAND_MAX;
C[i*ncol+j] = ((float)rand())/RAND_MAX;
printf("B: %f - C: %f\n", B[i*ncol+j],C[i*ncol+j]);
}
}
matrixAdd(A,B,C,nrow,ncol);
FILE *output = fopen("matrix_output.txt", "w");
if(output == NULL){
printf("A file wasn't created or located\n");
exit(EXIT_FAILURE);
}
for(i=0;i<nrow;i++)
{
for(j=0;j<ncol;j++)
{
fprintf(output,"%f ",A[i*ncol+j]);
}
fprintf(output,"\n");
}
free(A);
free(B);
free(C);
return 0;
}
void matrixAdd(float * h_A,float * h_B, float * h_C, int nrow,int ncol){
int size = nrow * ncol * sizeof(float);
float *d_A, *d_B, *d_C;
hipError_t error = hipMalloc((void **)&d_B, size);
if(error != hipSuccess){
printf("%s in %s at line %d \n", hipGetErrorString(error), __FILE__ ,__LINE__);
exit(EXIT_FAILURE);
}
hipMemcpy(d_B,h_B,size,hipMemcpyHostToDevice);
error = hipMalloc((void **)&d_C, size);
if(error != hipSuccess){
printf("%s in %s at line %d \n", hipGetErrorString(error), __FILE__ ,__LINE__);
exit(EXIT_FAILURE);
}
hipMemcpy(d_C,h_C,size,hipMemcpyHostToDevice);
error = hipMalloc((void **)&d_A, size);
if(error != hipSuccess){
printf("%s in %s at line %d \n", hipGetErrorString(error), __FILE__ ,__LINE__);
exit(EXIT_FAILURE);
}
//run kernel function with 32 threads for each block
hipLaunchKernelGGL(( matrixAddKernel), dim3(ceil(ncol/32.0)), dim3(32), 0, 0, d_A,d_B,d_C,ncol);
hipMemcpy(h_A,d_A,size,hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
__global__
void matrixAddKernel(float * A,float * B, float * C, int n){
int j = threadIdx.x + blockDim.x * blockIdx.x;
if(j < n){
int i;
for(i=0;i<n;i++){
A[j+n*i] = B[j+n*i] + C[j+n*i];
}
}
}
|
df6902fe65fc0f1f002679a86665a91d8815905e.cu
|
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <driver_types.h>
void matrixAdd(float *,float *,float *,int,int);
__global__ void matrixAddKernel(float *,float *,float *,int);
int main(int argc, char * argv[]){
int i,j,nrow,ncol;//,rows,columns,i;
float *A, *B, *C;
nrow = atoi(argv[1]);
ncol = atoi(argv[2]);
if((nrow != ncol) || argc != 3){
printf("Number of rows should be equal to number of columns\n");
exit(EXIT_FAILURE);
}
int size = nrow * ncol * sizeof(float);
A = (float *)malloc(size);
B = (float *)malloc(size);
C = (float *)malloc(size);
srand(time(NULL));
for(i=0;i<nrow;i++)
{
for(j=0;j<ncol;j++)
{
B[i*ncol+j] = ((float)rand())/RAND_MAX;
C[i*ncol+j] = ((float)rand())/RAND_MAX;
printf("B: %f - C: %f\n", B[i*ncol+j],C[i*ncol+j]);
}
}
matrixAdd(A,B,C,nrow,ncol);
FILE *output = fopen("matrix_output.txt", "w");
if(output == NULL){
printf("A file wasn't created or located\n");
exit(EXIT_FAILURE);
}
for(i=0;i<nrow;i++)
{
for(j=0;j<ncol;j++)
{
fprintf(output,"%f ",A[i*ncol+j]);
}
fprintf(output,"\n");
}
free(A);
free(B);
free(C);
return 0;
}
void matrixAdd(float * h_A,float * h_B, float * h_C, int nrow,int ncol){
int size = nrow * ncol * sizeof(float);
float *d_A, *d_B, *d_C;
cudaError_t error = cudaMalloc((void **)&d_B, size);
if(error != cudaSuccess){
printf("%s in %s at line %d \n", cudaGetErrorString(error), __FILE__ ,__LINE__);
exit(EXIT_FAILURE);
}
cudaMemcpy(d_B,h_B,size,cudaMemcpyHostToDevice);
error = cudaMalloc((void **)&d_C, size);
if(error != cudaSuccess){
printf("%s in %s at line %d \n", cudaGetErrorString(error), __FILE__ ,__LINE__);
exit(EXIT_FAILURE);
}
cudaMemcpy(d_C,h_C,size,cudaMemcpyHostToDevice);
error = cudaMalloc((void **)&d_A, size);
if(error != cudaSuccess){
printf("%s in %s at line %d \n", cudaGetErrorString(error), __FILE__ ,__LINE__);
exit(EXIT_FAILURE);
}
//run kernel function with 32 threads for each block
matrixAddKernel<<<ceil(ncol/32.0), 32>>>(d_A,d_B,d_C,ncol);
cudaMemcpy(h_A,d_A,size,cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
__global__
void matrixAddKernel(float * A,float * B, float * C, int n){
int j = threadIdx.x + blockDim.x * blockIdx.x;
if(j < n){
int i;
for(i=0;i<n;i++){
A[j+n*i] = B[j+n*i] + C[j+n*i];
}
}
}
|
e68e601cd7fe5935058db221fa277d132c4302be.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "imgproc.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <texture_fetch_functions.h>
#include <hip/hip_runtime_api.h>
#include <hip/device_functions.h>
const int block_x = 16;
const int block_y = 16;
const int blocksize = 64;
__constant__ int width;
__constant__ int height;
texture<uchar, 1, hipReadModeElementType> texRef;
__device__ void isort(uchar* lhs, int N)
{
int i, j;
uchar temp;
for (i = 1; i < N; ++i)
{
j = i - 1;
temp = lhs[i];
while (j > -1 && lhs[j] > temp)
{
lhs[j + 1] = lhs[j];
--j;
}
lhs[j + 1] = temp;
}
}
__global__ void cuda_median_fil(uchar* src, uchar* dst)
{
int x = threadIdx.x;
int y = threadIdx.y;
int index_x = x + blockIdx.x * (blockDim.x - 2);
int index_y = y + blockIdx.y * blockDim.y + 1;
if (index_x < width && index_y < height - 1)
{
__shared__ uchar temp[block_y * 3][block_x];
int top, mid, down, change;
top = tex1Dfetch(texRef, index_x + (index_y - 1) * width);
mid = tex1Dfetch(texRef, index_x + index_y * width);
down = tex1Dfetch(texRef, index_x + (index_y + 1) * width);
if (top < mid)
{
change = mid;
mid = top;
top = change;
}
if (top < down)
{
change = down;
down = top;
top = change;
}
if (mid < down)
{
change = down;
down = mid;
mid = change;
}
int index = 3 * y;
temp[index][x] = top;
temp[index + 1][x] = mid;
temp[index + 2][x] = down;
__syncthreads();
if (x > 0 && x < block_x - 1)
{
uchar box[3][3];
for (int i = 0; i < 3; ++i)
{
for (int j = -1; j < 2; ++j)
{
box[i][j] = temp[index + i][x + j];
}
}
for (int i = 0; i < 3; ++i)
{
isort(&box[i][0], 3);
}
if (box[0][0] < box[1][1])
{
change = box[0][0];
box[0][0] = box[1][1];
box[1][1] = change;
}
if (box[0][0] < box[2][2])
{
change = box[0][0];
box[0][0] = box[2][2];
box[2][2] = change;
}
if (box[1][1] < box[2][2])
dst[index_x + index_y * width] = box[2][2];
else
dst[index_x + index_y * width] = box[1][1];
}
}
}
void medianGPU(Mat src, Mat& dst)
{
if (src.type() != CV_8UC1)
{
src.convertTo(src, CV_8UC1);
}
copyMakeBorder(src, src, 1, 1, 1, 1, BORDER_CONSTANT, 0);
int size = src.rows * src.cols;
int count = size * sizeof(uchar);
uchar* dsrc, *ddst, *hdst;
hdst = (uchar*)malloc(count);
memset(hdst, 0, count);
if (!hdst)
{
cerr << "host memory allocated failed!" << endl;
return;
}
//allocate device memory
hipMalloc((void**)&dsrc, count);
hipMalloc((void**)&ddst, count);
//copy host to device
hipMemcpy(dsrc, src.data, count, hipMemcpyHostToDevice);
//bind to texture reference
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<uchar>();
hipBindTexture(0, &texRef, dsrc, &channelDesc, count);
//width and height
hipMemcpyToSymbol((const void*)&width, (const void*)&src.cols, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol((const void*)&height, (const void*)&src.rows, sizeof(int), 0, hipMemcpyHostToDevice);
//kernel function to do median filter
dim3 block(block_x, block_y);
dim3 grid((src.cols + block_x - 2) / (block_x - 2), (src.rows + block_y - 2) / block_y);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
cuda_median_fil << <grid, block >> >(dsrc, ddst);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsed_time;
hipEventElapsedTime(&elapsed_time, start, stop);
cout << "elapsed time is " << elapsed_time << " ms" << endl;
//unbind memory from texturereference
hipUnbindTexture(&texRef);
//memcpy from device to host
hipMemcpy(hdst, ddst, count, hipMemcpyDeviceToHost);
//copy to dst
dst = Mat_<uchar>(src.rows, src.cols, hdst).clone();
//release memory
hipFree(dsrc);
hipFree(ddst);
free(hdst);
}
|
e68e601cd7fe5935058db221fa277d132c4302be.cu
|
#include "imgproc.cuh"
#include <cuda_runtime.h>
#include <cuda.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <texture_fetch_functions.h>
#include <cuda_runtime_api.h>
#include <device_functions.h>
const int block_x = 16;
const int block_y = 16;
const int blocksize = 64;
__constant__ int width;
__constant__ int height;
texture<uchar, 1, cudaReadModeElementType> texRef;
__device__ void isort(uchar* lhs, int N)
{
int i, j;
uchar temp;
for (i = 1; i < N; ++i)
{
j = i - 1;
temp = lhs[i];
while (j > -1 && lhs[j] > temp)
{
lhs[j + 1] = lhs[j];
--j;
}
lhs[j + 1] = temp;
}
}
__global__ void cuda_median_fil(uchar* src, uchar* dst)
{
int x = threadIdx.x;
int y = threadIdx.y;
int index_x = x + blockIdx.x * (blockDim.x - 2);
int index_y = y + blockIdx.y * blockDim.y + 1;
if (index_x < width && index_y < height - 1)
{
__shared__ uchar temp[block_y * 3][block_x];
int top, mid, down, change;
top = tex1Dfetch(texRef, index_x + (index_y - 1) * width);
mid = tex1Dfetch(texRef, index_x + index_y * width);
down = tex1Dfetch(texRef, index_x + (index_y + 1) * width);
if (top < mid)
{
change = mid;
mid = top;
top = change;
}
if (top < down)
{
change = down;
down = top;
top = change;
}
if (mid < down)
{
change = down;
down = mid;
mid = change;
}
int index = 3 * y;
temp[index][x] = top;
temp[index + 1][x] = mid;
temp[index + 2][x] = down;
__syncthreads();
if (x > 0 && x < block_x - 1)
{
uchar box[3][3];
for (int i = 0; i < 3; ++i)
{
for (int j = -1; j < 2; ++j)
{
box[i][j] = temp[index + i][x + j];
}
}
for (int i = 0; i < 3; ++i)
{
isort(&box[i][0], 3);
}
if (box[0][0] < box[1][1])
{
change = box[0][0];
box[0][0] = box[1][1];
box[1][1] = change;
}
if (box[0][0] < box[2][2])
{
change = box[0][0];
box[0][0] = box[2][2];
box[2][2] = change;
}
if (box[1][1] < box[2][2])
dst[index_x + index_y * width] = box[2][2];
else
dst[index_x + index_y * width] = box[1][1];
}
}
}
void medianGPU(Mat src, Mat& dst)
{
if (src.type() != CV_8UC1)
{
src.convertTo(src, CV_8UC1);
}
copyMakeBorder(src, src, 1, 1, 1, 1, BORDER_CONSTANT, 0);
int size = src.rows * src.cols;
int count = size * sizeof(uchar);
uchar* dsrc, *ddst, *hdst;
hdst = (uchar*)malloc(count);
memset(hdst, 0, count);
if (!hdst)
{
cerr << "host memory allocated failed!" << endl;
return;
}
//allocate device memory
cudaMalloc((void**)&dsrc, count);
cudaMalloc((void**)&ddst, count);
//copy host to device
cudaMemcpy(dsrc, src.data, count, cudaMemcpyHostToDevice);
//bind to texture reference
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<uchar>();
cudaBindTexture(0, &texRef, dsrc, &channelDesc, count);
//width and height
cudaMemcpyToSymbol((const void*)&width, (const void*)&src.cols, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol((const void*)&height, (const void*)&src.rows, sizeof(int), 0, cudaMemcpyHostToDevice);
//kernel function to do median filter
dim3 block(block_x, block_y);
dim3 grid((src.cols + block_x - 2) / (block_x - 2), (src.rows + block_y - 2) / block_y);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cuda_median_fil << <grid, block >> >(dsrc, ddst);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsed_time;
cudaEventElapsedTime(&elapsed_time, start, stop);
cout << "elapsed time is " << elapsed_time << " ms" << endl;
//unbind memory from texturereference
cudaUnbindTexture(&texRef);
//memcpy from device to host
cudaMemcpy(hdst, ddst, count, cudaMemcpyDeviceToHost);
//copy to dst
dst = Mat_<uchar>(src.rows, src.cols, hdst).clone();
//release memory
cudaFree(dsrc);
cudaFree(ddst);
free(hdst);
}
|
fef5a1767a4b8f83e2a66b0a67e8213bd79dae18.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void DeviceMultiply(double* left, double* right, double* result, int left_rows, int left_cols, int right_cols) {
int i = threadIdx.y;
int j = threadIdx.x;
int x_stride = blockDim.x;
int y_stride = blockDim.y;
__shared__ double temp[16][16];
for (int y_offset = 0; i + y_offset < left_rows; y_offset += y_stride) {
for (int x_offset = 0; j + x_offset < right_cols; x_offset += x_stride) {
temp[i][j] = 0.0;
for (int k = 0; k < left_cols; ++k) {
int left_idx = (y_offset + i) * left_cols + k;
int right_idx = k * right_cols + x_offset + j;
temp[i][j] += left[left_idx] * right[right_idx];
}
int result_idx = (y_offset + i) * right_cols + x_offset + j;
result[result_idx] = temp[i][j];
}
}
}
|
fef5a1767a4b8f83e2a66b0a67e8213bd79dae18.cu
|
#include "includes.h"
__global__ void DeviceMultiply(double* left, double* right, double* result, int left_rows, int left_cols, int right_cols) {
int i = threadIdx.y;
int j = threadIdx.x;
int x_stride = blockDim.x;
int y_stride = blockDim.y;
__shared__ double temp[16][16];
for (int y_offset = 0; i + y_offset < left_rows; y_offset += y_stride) {
for (int x_offset = 0; j + x_offset < right_cols; x_offset += x_stride) {
temp[i][j] = 0.0;
for (int k = 0; k < left_cols; ++k) {
int left_idx = (y_offset + i) * left_cols + k;
int right_idx = k * right_cols + x_offset + j;
temp[i][j] += left[left_idx] * right[right_idx];
}
int result_idx = (y_offset + i) * right_cols + x_offset + j;
result[result_idx] = temp[i][j];
}
}
}
|
b37bbe6ac418f4878362984459f7e46faa576cfa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__ void kernel()
{
}
void cudamain()
{
hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, );
return;
}
|
b37bbe6ac418f4878362984459f7e46faa576cfa.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void kernel()
{
}
void cudamain()
{
kernel<<<1,1>>>();
return;
}
|
c4f4ceaee8649cd91ce2fdd60e471109394a70ac.hip
|
// !!! This is a file automatically generated by hipify!!!
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include "sceneStructs.h"
#include "glm/glm.hpp"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#include <vector>
#if TORCH_HIP_VERSION >= 5000
#include <helper_math.h>
#else
#include <cutil_math.h>
#endif
#define M_PI 3.14159265359f
enum { DEBUG_COLLISIONS, DEBUG_SHADOWS, DEBUG_REFLECTIONS, DEBUG_DIFFUSE, DEBUG_NORMALS, DEBUG_ALL };
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//TODO: IMPLEMENT THIS FUNCTION
//Function that does the initial raycast from the camera
__host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov){
// Create ray using pinhole camera projection
float px_size_x = tan( fov.x * (PI/180.0) );
float px_size_y = tan( fov.y * (PI/180.0) );
ray r;
r.origin = eye;
r.direction = view + (-2*px_size_x*x/resolution.x + px_size_x)*glm::cross( view, up ) \
+ (-2*px_size_y*y/resolution.y + px_size_y)*up;
return r;
}
__host__ __device__ glm::vec3 computeGeomColor( int min_intersection_ind, staticGeom* geoms, material* materials, ray r, float intersection_dist, glm::vec3 intersection_normal, glm::vec3 intersection_point ) {
// Set color equal to material color
int mat_id = geoms[min_intersection_ind].materialid;
//colors[index] = materials[mat_id].color;
// Calculate Phong Lighting
// Start with arbritrarily chosen light source, lets say from the camera
glm::vec3 debug_light_source(0.0, 0.0, 0.0);
glm::vec3 light_vector = glm::normalize(debug_light_source - intersection_point);
glm::vec3 viewing_vector = glm::normalize( r.origin - intersection_point );
glm::vec3 reflection_vector = 2*glm::dot( light_vector, intersection_normal )*intersection_normal - light_vector;
// Calculate Phong Reflection Model ... this is mad inefficient at the moment
//float m_d = 1.0; //?
//float s_d = 1.0; //?
//float c_d = s_d*m_d*max( glm::dot( intersection_normal, light_vector ), 0.0 );
//float c_d = powf( max( glm::dot( light_vector, viewing_vector ), 0.0 ), materials[mat_id].specularExponent );
float ks = 1.0; // specular reflection constant
float kd = 0.5; // diffuse reflection constant
float ka = 0.5; // ambient reflection constant
// Ambient Component
glm::vec3 ambient(1.0, 1.0, 1.0);
// Diffuse Component
//glm::vec3 diffuseIntensity( 1.0, 1.0, 1.0 ); Not needed at the moment
float diffuse = max(glm::dot( light_vector, intersection_normal ), 0.0);
// Specular Component
float specularExponent = materials[mat_id].specularExponent; // alpha, shinyiness
glm::vec3 specColor = materials[mat_id].specularColor;
glm::vec3 specular( 0.0, 0.0, 0.0 );
if ( specularExponent > 0.0 ) {
specular = specColor*powf( max( glm::dot( reflection_vector, viewing_vector ), 0.0 ), specularExponent );
}
// Full illumination
glm::vec3 Illumination = ka*ambient + kd*diffuse + ks*specular;
return Illumination*materials[mat_id].color;
}
// Compute Light Contribution to object
__host__ __device__ glm::vec3 computeLightContribution( float shadowContribution, material mat, ray current_ray, ray light_ray, glm::vec3 intersection_normal, glm::vec3 intersection_point ) {
glm::vec3 light_vector = light_ray.direction;
glm::vec3 viewing_vector = glm::normalize( current_ray.origin - intersection_point );
glm::vec3 reflection_vector = 2*glm::dot( light_vector, intersection_normal )*intersection_normal - light_vector;
// Temporarily
float ka = 0.5; // ambient
float ks = 1.0; // specular reflection constant
float kd = 0.5; // diffuse reflection constant
glm::vec3 ambient( 1.0, 1.0, 1.0 );
float diffuse = max(glm::dot( light_vector, intersection_normal ), 0.0);
// Specular Component
float specularExponent = mat.specularExponent; // alpha, shinyiness
glm::vec3 specColor = mat.specularColor;
glm::vec3 specular( 0.0, 0.0, 0.0 );
if ( specularExponent > 0.0 ) {
specular = specColor*powf( max( glm::dot( reflection_vector, viewing_vector ), 0.0 ), specularExponent );
}
// Full illumination
glm::vec3 illumination = ka*ambient + shadowContribution*kd*diffuse + shadowContribution*ks*specular;
return illumination*mat.color;
}
// Find closest intersection
__host__ __device__ int closestIntersection( ray r, staticGeom* geoms, int numberOfGeoms, float& intersection_dist, glm::vec3& intersection_normal, glm::vec3& intersection_point ) {
// Check for intersections. This has way too many branches :/
int min_intersection_ind = -1;
float intersection_dist_new;
glm::vec3 intersection_point_new;
glm::vec3 intersection_normal_new;
for (int i=0; i < numberOfGeoms; ++i ) {
// Check for intersection with Sphere
if ( geoms[i].type == SPHERE ) {
intersection_dist_new = sphereIntersectionTest(geoms[i], r, intersection_point_new, intersection_normal_new);
} else if ( geoms[i].type == CUBE ) {
intersection_dist_new = boxIntersectionTest(geoms[i], r, intersection_point_new, intersection_normal_new);
} else if ( geoms[i].type == MESH ) {
// TODO
}
if (intersection_dist_new != -1 ) {
// If new distance is closer than previously seen one then use the new one
if ( intersection_dist_new < intersection_dist || intersection_dist == -1 ) {
intersection_dist = intersection_dist_new;
intersection_point = intersection_point_new;
intersection_normal = intersection_normal_new;
min_intersection_ind = i;
}
}
}
return min_intersection_ind;
}
// Check if ray to light is occluded by an object
// This is going to be super inefficient, for each geom check if its a light if so trace a ray to
// it and see if that intersects with any other geoms.
/*
__host__ __device__ int isShadowRay( glm::vec3 light, ray &light_ray, staticGeom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials, glm::vec3 intersection_point ) {
// DOESN'T WORK YET!?!?
// Ray to light
light_ray.origin = intersection_point;
int obj_ind = -1;
glm::vec3 light_vector;
// Unfortunately I don't really care about these, I should probably do a refactor
glm::vec3 obstacle_intersection_normal;
glm::vec3 obstacle_intersection_point;
float obstacle_intersection_dist;
// Closest light index
int light_index = -1;
light_ray.direction = glm::normalize(light - intersection_point);
ray intersection_ray;
intersection_ray.origin = intersection_point;
intersection_ray.direction = light_ray.direction;
obj_ind = closestIntersection( intersection_ray, geoms, numberOfGeoms, obstacle_intersection_dist, obstacle_intersection_normal, obstacle_intersection_point );
return obj_ind;
}
*/
__host__ __device__ float isShadowRay( ray light_ray, int intersection_geom_ind, staticGeom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials ) {
float shadow_contribution = 1.0;
int min_intersection_ind = -1;
float intersection_dist = -1;
glm::vec3 intersection_point;
glm::vec3 intersection_normal;
float intersection_dist_new;
glm::vec3 intersection_point_new;
glm::vec3 intersection_normal_new;
for (int i=0; i < numberOfGeoms; ++i ) {
if ( i == intersection_geom_ind ) {
continue;
}
// Check for intersection with Sphere
if ( geoms[i].type == SPHERE ) {
intersection_dist_new = sphereIntersectionTest(geoms[i], light_ray, intersection_point_new, intersection_normal_new);
} else if ( geoms[i].type == CUBE ) {
intersection_dist_new = boxIntersectionTest(geoms[i], light_ray, intersection_point_new, intersection_normal_new);
} else if ( geoms[i].type == MESH ) {
// TODO
}
if (intersection_dist_new != -1 ) {
// If new distance is closer than previously seen one then use the new one
if ( intersection_dist_new < intersection_dist || intersection_dist == -1 ) {
intersection_dist = intersection_dist_new;
intersection_point = intersection_point_new;
intersection_normal = intersection_normal_new;
min_intersection_ind = i;
shadow_contribution = 0.0;
}
}
}
return shadow_contribution;
}
// Calculate light ray
__host__ __device__ ray computeLightRay( glm::vec3 light, glm::vec3 intersection_point ) {
ray light_ray;
light_ray.origin = intersection_point;
light_ray.direction = glm::normalize(light - intersection_point);
return light_ray;
}
// Calculate reflected ray
__host__ __device__ ray computeReflectedRay( ray currentRay, glm::vec3 intersection_normal, glm::vec3 intersection_point ) {
ray reflected_ray;
reflected_ray.origin = intersection_point;
reflected_ray.direction = -2*glm::dot(currentRay.direction, intersection_normal)*intersection_normal + currentRay.direction;
return reflected_ray;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image, int iterations){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
float scl = 1.0f/((float)iterations);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = scl*image[index].x*255.0;
color.y = scl*image[index].y*255.0;
color.z = scl*image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
//TODO: IMPLEMENT THIS FUNCTION
//Core raytracer kernel
__global__ void raytraceRay(glm::vec2 resolution, float time, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials,
int debugMode ){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
int obj_index = -1;
float intersection_dist = -1; // where is NaN / Infinity? dumb shit
glm::vec3 intersection_point;
glm::vec3 intersection_normal;
glm::vec3 intersection_point_new;
glm::vec3 intersection_normal_new;
// Ambient Component
//glm::vec3 ambient(0.2, 0.2, 0.5);
glm::vec3 ambient;
//glm::vec3 light( 10.0, 0.0, 0.0 );
//glm::vec3 light(-10.0, 0.0, -2.0);
glm::vec3 light( 0.0, 2.0, 8.0 );
glm::vec3 light_sample;
ray light_ray;
//glm::vec3 color(0.0, 0.0, 0.0);
glm::vec3 color = ambient;
glm::vec3 colorContribution(1.0,1.0,1.0);
float shadowContribution = 1.0;
if((x<=resolution.x && y<=resolution.y)){
// Calculate initial ray as projected from camera
ray currentRay = raycastFromCameraKernel( cam.resolution, time, x, y, cam.position, cam.view, cam.up, cam.fov );
ray lightRay;
// Iteratively trace rays until depth is reached
int depth;
if ( debugMode == DEBUG_ALL || debugMode == DEBUG_REFLECTIONS ) {
depth = 4;
} else {
depth = 1;
}
for (int i=0; i<depth; ++i) {
obj_index = closestIntersection( currentRay, geoms, numberOfGeoms, intersection_dist, intersection_normal, intersection_point );
if (obj_index == -1) {
break;
}
light_sample = getRandomPointOnLight( light, 0.25, index*time );
lightRay = computeLightRay( light_sample, intersection_point );
shadowContribution = isShadowRay( lightRay, obj_index, geoms, numberOfGeoms, materials, numberOfMaterials );
if ( debugMode == DEBUG_ALL ) {
// All? Debug Mode
//int mat_id = isShadowRay( light, lightRay, geoms, numberOfGeoms, materials, numberOfMaterials, intersection_point );
color += colorContribution*computeLightContribution( shadowContribution, materials[geoms[obj_index].materialid], currentRay, lightRay, intersection_normal, intersection_point );
colorContribution *= materials[geoms[obj_index].materialid].absorptionCoefficient;
// Calculate reflected rays
if ( materials[geoms[obj_index].materialid].hasReflective ) {
currentRay = computeReflectedRay( currentRay, intersection_normal, intersection_point );
}
} else if ( debugMode == DEBUG_SHADOWS ) {
// Shadows debug mode
float diffuse = max(glm::dot( lightRay.direction, intersection_normal ), 0.0);
ambient = glm::vec3( 0.25, 0.25, 0.25 );
color = shadowContribution*glm::vec3(1.0,1.0,1.0)*diffuse + ambient;
} else if ( debugMode == DEBUG_DIFFUSE ) {
float diffuse = glm::dot( lightRay.direction, intersection_normal );
color = 0.5f*glm::vec3(1.0,1.0,1.0)*diffuse + 0.5f;
} else if ( debugMode == DEBUG_COLLISIONS ) {
// Collisions debug mode
color = materials[geoms[obj_index].materialid].color;
} else if ( debugMode == DEBUG_NORMALS ) {
color = 0.5f*intersection_normal + 0.5f;
//color = 0.5f*glm::vec3(0.0, 0.0, -1.0) + 0.5f;
//color = 0.5f*lightRay.direction + 0.5f;
}
}
//colors[index] = generateRandomNumberFromThread(resolution, time, x, y);
// Accumulate in image buffer
//colors[index] = color;
colors[index] += color;
}
}
//TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms){
int traceDepth = 1; //determines how many bounces the raytracer traces
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
//send image to GPU
glm::vec3* cudaimage = NULL;
hipMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
hipMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyHostToDevice);
//package geometry and materials and sent to GPU
staticGeom* geomList = new staticGeom[numberOfGeoms];
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
geomList[i] = newStaticGeom;
}
staticGeom* cudageoms = NULL;
hipMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
hipMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), hipMemcpyHostToDevice);
material* cudamaterials = NULL;
hipMalloc((void**)&cudamaterials, numberOfMaterials*sizeof(material));
hipMemcpy( cudamaterials, materials, numberOfMaterials*sizeof(material), hipMemcpyHostToDevice);
//package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
//int debugMode = DEBUG_COLLISIONS;
int debugMode = DEBUG_ALL;
//int debugMode = DEBUG_DIFFUSE;
//int debugMode = DEBUG_NORMALS;
//kernel launches
hipLaunchKernelGGL(( raytraceRay), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, renderCam->resolution, (float)iterations, cam, traceDepth, cudaimage, cudageoms, numberOfGeoms, cudamaterials, numberOfMaterials, debugMode);
hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, renderCam->resolution, cudaimage, iterations);
//retrieve image from GPU
hipMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyDeviceToHost);
//free up stuff, or else we'll leak memory like a madman
hipFree( cudaimage );
hipFree( cudageoms );
delete geomList;
// make certain the kernel has completed
hipDeviceSynchronize();
hipError_t errorNum = hipPeekAtLastError();
if ( errorNum != hipSuccess ) {
printf ("Cuda error -- %s\n", hipGetErrorString(errorNum));
}
checkCUDAError("Kernel failed!");
}
|
c4f4ceaee8649cd91ce2fdd60e471109394a70ac.cu
|
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include "sceneStructs.h"
#include "glm/glm.hpp"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#include <vector>
#if CUDA_VERSION >= 5000
#include <helper_math.h>
#else
#include <cutil_math.h>
#endif
#define M_PI 3.14159265359f
enum { DEBUG_COLLISIONS, DEBUG_SHADOWS, DEBUG_REFLECTIONS, DEBUG_DIFFUSE, DEBUG_NORMALS, DEBUG_ALL };
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//TODO: IMPLEMENT THIS FUNCTION
//Function that does the initial raycast from the camera
__host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov){
// Create ray using pinhole camera projection
float px_size_x = tan( fov.x * (PI/180.0) );
float px_size_y = tan( fov.y * (PI/180.0) );
ray r;
r.origin = eye;
r.direction = view + (-2*px_size_x*x/resolution.x + px_size_x)*glm::cross( view, up ) \
+ (-2*px_size_y*y/resolution.y + px_size_y)*up;
return r;
}
__host__ __device__ glm::vec3 computeGeomColor( int min_intersection_ind, staticGeom* geoms, material* materials, ray r, float intersection_dist, glm::vec3 intersection_normal, glm::vec3 intersection_point ) {
// Set color equal to material color
int mat_id = geoms[min_intersection_ind].materialid;
//colors[index] = materials[mat_id].color;
// Calculate Phong Lighting
// Start with arbritrarily chosen light source, lets say from the camera
glm::vec3 debug_light_source(0.0, 0.0, 0.0);
glm::vec3 light_vector = glm::normalize(debug_light_source - intersection_point);
glm::vec3 viewing_vector = glm::normalize( r.origin - intersection_point );
glm::vec3 reflection_vector = 2*glm::dot( light_vector, intersection_normal )*intersection_normal - light_vector;
// Calculate Phong Reflection Model ... this is mad inefficient at the moment
//float m_d = 1.0; //?
//float s_d = 1.0; //?
//float c_d = s_d*m_d*max( glm::dot( intersection_normal, light_vector ), 0.0 );
//float c_d = powf( max( glm::dot( light_vector, viewing_vector ), 0.0 ), materials[mat_id].specularExponent );
float ks = 1.0; // specular reflection constant
float kd = 0.5; // diffuse reflection constant
float ka = 0.5; // ambient reflection constant
// Ambient Component
glm::vec3 ambient(1.0, 1.0, 1.0);
// Diffuse Component
//glm::vec3 diffuseIntensity( 1.0, 1.0, 1.0 ); Not needed at the moment
float diffuse = max(glm::dot( light_vector, intersection_normal ), 0.0);
// Specular Component
float specularExponent = materials[mat_id].specularExponent; // alpha, shinyiness
glm::vec3 specColor = materials[mat_id].specularColor;
glm::vec3 specular( 0.0, 0.0, 0.0 );
if ( specularExponent > 0.0 ) {
specular = specColor*powf( max( glm::dot( reflection_vector, viewing_vector ), 0.0 ), specularExponent );
}
// Full illumination
glm::vec3 Illumination = ka*ambient + kd*diffuse + ks*specular;
return Illumination*materials[mat_id].color;
}
// Compute Light Contribution to object
__host__ __device__ glm::vec3 computeLightContribution( float shadowContribution, material mat, ray current_ray, ray light_ray, glm::vec3 intersection_normal, glm::vec3 intersection_point ) {
glm::vec3 light_vector = light_ray.direction;
glm::vec3 viewing_vector = glm::normalize( current_ray.origin - intersection_point );
glm::vec3 reflection_vector = 2*glm::dot( light_vector, intersection_normal )*intersection_normal - light_vector;
// Temporarily
float ka = 0.5; // ambient
float ks = 1.0; // specular reflection constant
float kd = 0.5; // diffuse reflection constant
glm::vec3 ambient( 1.0, 1.0, 1.0 );
float diffuse = max(glm::dot( light_vector, intersection_normal ), 0.0);
// Specular Component
float specularExponent = mat.specularExponent; // alpha, shinyiness
glm::vec3 specColor = mat.specularColor;
glm::vec3 specular( 0.0, 0.0, 0.0 );
if ( specularExponent > 0.0 ) {
specular = specColor*powf( max( glm::dot( reflection_vector, viewing_vector ), 0.0 ), specularExponent );
}
// Full illumination
glm::vec3 illumination = ka*ambient + shadowContribution*kd*diffuse + shadowContribution*ks*specular;
return illumination*mat.color;
}
// Find closest intersection
__host__ __device__ int closestIntersection( ray r, staticGeom* geoms, int numberOfGeoms, float& intersection_dist, glm::vec3& intersection_normal, glm::vec3& intersection_point ) {
// Check for intersections. This has way too many branches :/
int min_intersection_ind = -1;
float intersection_dist_new;
glm::vec3 intersection_point_new;
glm::vec3 intersection_normal_new;
for (int i=0; i < numberOfGeoms; ++i ) {
// Check for intersection with Sphere
if ( geoms[i].type == SPHERE ) {
intersection_dist_new = sphereIntersectionTest(geoms[i], r, intersection_point_new, intersection_normal_new);
} else if ( geoms[i].type == CUBE ) {
intersection_dist_new = boxIntersectionTest(geoms[i], r, intersection_point_new, intersection_normal_new);
} else if ( geoms[i].type == MESH ) {
// TODO
}
if (intersection_dist_new != -1 ) {
// If new distance is closer than previously seen one then use the new one
if ( intersection_dist_new < intersection_dist || intersection_dist == -1 ) {
intersection_dist = intersection_dist_new;
intersection_point = intersection_point_new;
intersection_normal = intersection_normal_new;
min_intersection_ind = i;
}
}
}
return min_intersection_ind;
}
// Check if ray to light is occluded by an object
// This is going to be super inefficient, for each geom check if its a light if so trace a ray to
// it and see if that intersects with any other geoms.
/*
__host__ __device__ int isShadowRay( glm::vec3 light, ray &light_ray, staticGeom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials, glm::vec3 intersection_point ) {
// DOESN'T WORK YET!?!?
// Ray to light
light_ray.origin = intersection_point;
int obj_ind = -1;
glm::vec3 light_vector;
// Unfortunately I don't really care about these, I should probably do a refactor
glm::vec3 obstacle_intersection_normal;
glm::vec3 obstacle_intersection_point;
float obstacle_intersection_dist;
// Closest light index
int light_index = -1;
light_ray.direction = glm::normalize(light - intersection_point);
ray intersection_ray;
intersection_ray.origin = intersection_point;
intersection_ray.direction = light_ray.direction;
obj_ind = closestIntersection( intersection_ray, geoms, numberOfGeoms, obstacle_intersection_dist, obstacle_intersection_normal, obstacle_intersection_point );
return obj_ind;
}
*/
__host__ __device__ float isShadowRay( ray light_ray, int intersection_geom_ind, staticGeom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials ) {
float shadow_contribution = 1.0;
int min_intersection_ind = -1;
float intersection_dist = -1;
glm::vec3 intersection_point;
glm::vec3 intersection_normal;
float intersection_dist_new;
glm::vec3 intersection_point_new;
glm::vec3 intersection_normal_new;
for (int i=0; i < numberOfGeoms; ++i ) {
if ( i == intersection_geom_ind ) {
continue;
}
// Check for intersection with Sphere
if ( geoms[i].type == SPHERE ) {
intersection_dist_new = sphereIntersectionTest(geoms[i], light_ray, intersection_point_new, intersection_normal_new);
} else if ( geoms[i].type == CUBE ) {
intersection_dist_new = boxIntersectionTest(geoms[i], light_ray, intersection_point_new, intersection_normal_new);
} else if ( geoms[i].type == MESH ) {
// TODO
}
if (intersection_dist_new != -1 ) {
// If new distance is closer than previously seen one then use the new one
if ( intersection_dist_new < intersection_dist || intersection_dist == -1 ) {
intersection_dist = intersection_dist_new;
intersection_point = intersection_point_new;
intersection_normal = intersection_normal_new;
min_intersection_ind = i;
shadow_contribution = 0.0;
}
}
}
return shadow_contribution;
}
// Calculate light ray
__host__ __device__ ray computeLightRay( glm::vec3 light, glm::vec3 intersection_point ) {
ray light_ray;
light_ray.origin = intersection_point;
light_ray.direction = glm::normalize(light - intersection_point);
return light_ray;
}
// Calculate reflected ray
__host__ __device__ ray computeReflectedRay( ray currentRay, glm::vec3 intersection_normal, glm::vec3 intersection_point ) {
ray reflected_ray;
reflected_ray.origin = intersection_point;
reflected_ray.direction = -2*glm::dot(currentRay.direction, intersection_normal)*intersection_normal + currentRay.direction;
return reflected_ray;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image, int iterations){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
float scl = 1.0f/((float)iterations);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = scl*image[index].x*255.0;
color.y = scl*image[index].y*255.0;
color.z = scl*image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
//TODO: IMPLEMENT THIS FUNCTION
//Core raytracer kernel
__global__ void raytraceRay(glm::vec2 resolution, float time, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials,
int debugMode ){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
int obj_index = -1;
float intersection_dist = -1; // where is NaN / Infinity? dumb shit
glm::vec3 intersection_point;
glm::vec3 intersection_normal;
glm::vec3 intersection_point_new;
glm::vec3 intersection_normal_new;
// Ambient Component
//glm::vec3 ambient(0.2, 0.2, 0.5);
glm::vec3 ambient;
//glm::vec3 light( 10.0, 0.0, 0.0 );
//glm::vec3 light(-10.0, 0.0, -2.0);
glm::vec3 light( 0.0, 2.0, 8.0 );
glm::vec3 light_sample;
ray light_ray;
//glm::vec3 color(0.0, 0.0, 0.0);
glm::vec3 color = ambient;
glm::vec3 colorContribution(1.0,1.0,1.0);
float shadowContribution = 1.0;
if((x<=resolution.x && y<=resolution.y)){
// Calculate initial ray as projected from camera
ray currentRay = raycastFromCameraKernel( cam.resolution, time, x, y, cam.position, cam.view, cam.up, cam.fov );
ray lightRay;
// Iteratively trace rays until depth is reached
int depth;
if ( debugMode == DEBUG_ALL || debugMode == DEBUG_REFLECTIONS ) {
depth = 4;
} else {
depth = 1;
}
for (int i=0; i<depth; ++i) {
obj_index = closestIntersection( currentRay, geoms, numberOfGeoms, intersection_dist, intersection_normal, intersection_point );
if (obj_index == -1) {
break;
}
light_sample = getRandomPointOnLight( light, 0.25, index*time );
lightRay = computeLightRay( light_sample, intersection_point );
shadowContribution = isShadowRay( lightRay, obj_index, geoms, numberOfGeoms, materials, numberOfMaterials );
if ( debugMode == DEBUG_ALL ) {
// All? Debug Mode
//int mat_id = isShadowRay( light, lightRay, geoms, numberOfGeoms, materials, numberOfMaterials, intersection_point );
color += colorContribution*computeLightContribution( shadowContribution, materials[geoms[obj_index].materialid], currentRay, lightRay, intersection_normal, intersection_point );
colorContribution *= materials[geoms[obj_index].materialid].absorptionCoefficient;
// Calculate reflected rays
if ( materials[geoms[obj_index].materialid].hasReflective ) {
currentRay = computeReflectedRay( currentRay, intersection_normal, intersection_point );
}
} else if ( debugMode == DEBUG_SHADOWS ) {
// Shadows debug mode
float diffuse = max(glm::dot( lightRay.direction, intersection_normal ), 0.0);
ambient = glm::vec3( 0.25, 0.25, 0.25 );
color = shadowContribution*glm::vec3(1.0,1.0,1.0)*diffuse + ambient;
} else if ( debugMode == DEBUG_DIFFUSE ) {
float diffuse = glm::dot( lightRay.direction, intersection_normal );
color = 0.5f*glm::vec3(1.0,1.0,1.0)*diffuse + 0.5f;
} else if ( debugMode == DEBUG_COLLISIONS ) {
// Collisions debug mode
color = materials[geoms[obj_index].materialid].color;
} else if ( debugMode == DEBUG_NORMALS ) {
color = 0.5f*intersection_normal + 0.5f;
//color = 0.5f*glm::vec3(0.0, 0.0, -1.0) + 0.5f;
//color = 0.5f*lightRay.direction + 0.5f;
}
}
//colors[index] = generateRandomNumberFromThread(resolution, time, x, y);
// Accumulate in image buffer
//colors[index] = color;
colors[index] += color;
}
}
//TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms){
int traceDepth = 1; //determines how many bounces the raytracer traces
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
//send image to GPU
glm::vec3* cudaimage = NULL;
cudaMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
cudaMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyHostToDevice);
//package geometry and materials and sent to GPU
staticGeom* geomList = new staticGeom[numberOfGeoms];
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
geomList[i] = newStaticGeom;
}
staticGeom* cudageoms = NULL;
cudaMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
cudaMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), cudaMemcpyHostToDevice);
material* cudamaterials = NULL;
cudaMalloc((void**)&cudamaterials, numberOfMaterials*sizeof(material));
cudaMemcpy( cudamaterials, materials, numberOfMaterials*sizeof(material), cudaMemcpyHostToDevice);
//package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
//int debugMode = DEBUG_COLLISIONS;
int debugMode = DEBUG_ALL;
//int debugMode = DEBUG_DIFFUSE;
//int debugMode = DEBUG_NORMALS;
//kernel launches
raytraceRay<<<fullBlocksPerGrid, threadsPerBlock>>>(renderCam->resolution, (float)iterations, cam, traceDepth, cudaimage, cudageoms, numberOfGeoms, cudamaterials, numberOfMaterials, debugMode);
sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, renderCam->resolution, cudaimage, iterations);
//retrieve image from GPU
cudaMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyDeviceToHost);
//free up stuff, or else we'll leak memory like a madman
cudaFree( cudaimage );
cudaFree( cudageoms );
delete geomList;
// make certain the kernel has completed
cudaThreadSynchronize();
cudaError_t errorNum = cudaPeekAtLastError();
if ( errorNum != cudaSuccess ) {
printf ("Cuda error -- %s\n", cudaGetErrorString(errorNum));
}
checkCUDAError("Kernel failed!");
}
|
d75404c2246eb9433535b69b2e97efb39244a238.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from dgemm_tesla_T_T.cu normal d -> s, Fri Jan 30 19:00:10 2015
*/
#include "common_magma.h"
#include "commonblas_s.h"
/*
* saxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void saxpy(
float alpha,
const float* __restrict__ b,
float* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
/**
Purpose:
--------
This routine computes
C = alpha * A^T*B^T + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This code should run for any matrix size.
This kernel outperforms cuda-2.2 when m, n, k >= 512
@ingroup magma_sblas3
********************************************************************/
__global__ void
sgemm_kernel_T_T_64_16_16_16_4(
float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta )
{
__shared__ float Bb[16][17];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
/*
Taking care of invalid memory access in dimension M
*/
if ( ibx + idt >= m )
A += ibx + 0;
else
A += ibx + idt;
C += __mul24(ibx + idt, ldc) + iby;
B += tx + __mul24(iby, ldb);
/*
These variables guide the threads to avoid invalid memory
accesses in dimension N
Simply it's the stopping criterion.
or you can say that access index wraps around to a valid
memory location.
*/
int s1=0, s2=4*ldb, s3=8*ldb, s4=12*ldb;
if ( iby+ty >= n ) { s1=1; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+4 >= n ) { s1=0; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+8 >= n ) { s1=0; s2=4*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+12 >= n ) { s1=0; s2=4*ldb; s3=8*ldb; s4=0*ldb; }
if ( s1 == 0 )
B += __mul24(ty, ldb);
else
s1 = 0;
const float *Bend = B + k - k % 16;
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if ( k > 15 ) {
do {
float Ab[4] = { A[0], A[lda], A[2*lda], A[3*lda] };
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
A += 4 * lda;
saxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[12][0], Cb );
saxpy( Ab[1], &Bb[13][0], Cb );
saxpy( Ab[2], &Bb[14][0], Cb );
saxpy( Ab[3], &Bb[15][0], Cb );
B += 16;
__syncthreads();
} while (B < Bend);
}
/*
Common sub expression elimination.
*/
ibx = ibx + idt - m;
/*
remembering k dimension
*/
ldb = m = k;
/*
k changed to support the generic case and reuse valuable registers
*/
k = k % 16;
m -= k;
/*
Here we are taking care of k % dim_k portions
*/
if ( k != 0 ) {
/*
Avoid Invalid Memory access in dimension K
If some thread enters this if ( ) block first access to B
should be valid as K isn't divisible by blk_K
Note that dimension N has been taken care of by s1, s2, s3, s4
But depending upon K and thread index tx, some memory access
may be still invalid, so take care of them now by
setting s1, s2, s3, s4 = 0
B might have been advanced in the previous loop, take care
of that, this is about right bottom corner.
*/
if ( m + tx >= ldb ) {
s1 = s2 = s3 = s4 = 0;
B -= tx;
}
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
for(int i=0; i < k; i++) {
saxpy( A[0], &Bb[i+0][0], Cb );
A += lda;
}
}
/*
Now taking care of dimension M, N that doesnt fit into blocks
*/
if ( (iby + 16) >= n ) {
lda = n - iby;
}
else {
lda = 16;
}
if ( ibx >= 0 )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
C[12] = alpha * Cb[12] + beta * C[12];
C[13] = alpha * Cb[13] + beta * C[13];
C[14] = alpha * Cb[14] + beta * C[14];
C[15] = alpha * Cb[15] + beta * C[15];
break;
case 15:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
C[12] = alpha * Cb[12] + beta * C[12];
C[13] = alpha * Cb[13] + beta * C[13];
C[14] = alpha * Cb[14] + beta * C[14];
break;
case 14:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
C[12] = alpha * Cb[12] + beta * C[12];
C[13] = alpha * Cb[13] + beta * C[13];
break;
case 13:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
C[12] = alpha * Cb[12] + beta * C[12];
break;
case 12:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
break;
case 11:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
break;
case 10:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
break;
case 9:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
break;
case 8:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
break;
case 7:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
break;
case 6:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
break;
case 5:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
break;
case 4:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
break;
case 3:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
break;
case 2:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
break;
case 1:
C[0] = alpha * Cb[0] + beta * C[0];
break;
case 0:
break;
}
}
extern "C" void
magmablas_sgemm_T_T_64_16_16_16_4(
float *C, const float *A, const float *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
float alpha, float beta )
{
dim3 threads( 16, 4 );
dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 );
hipLaunchKernelGGL(( sgemm_kernel_T_T_64_16_16_16_4), dim3(grid), dim3(threads), 0, magma_stream ,
C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
|
d75404c2246eb9433535b69b2e97efb39244a238.cu
|
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from dgemm_tesla_T_T.cu normal d -> s, Fri Jan 30 19:00:10 2015
*/
#include "common_magma.h"
#include "commonblas_s.h"
/*
* saxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void saxpy(
float alpha,
const float* __restrict__ b,
float* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
/**
Purpose:
--------
This routine computes
C = alpha * A^T*B^T + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This code should run for any matrix size.
This kernel outperforms cuda-2.2 when m, n, k >= 512
@ingroup magma_sblas3
********************************************************************/
__global__ void
sgemm_kernel_T_T_64_16_16_16_4(
float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta )
{
__shared__ float Bb[16][17];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
/*
Taking care of invalid memory access in dimension M
*/
if ( ibx + idt >= m )
A += ibx + 0;
else
A += ibx + idt;
C += __mul24(ibx + idt, ldc) + iby;
B += tx + __mul24(iby, ldb);
/*
These variables guide the threads to avoid invalid memory
accesses in dimension N
Simply it's the stopping criterion.
or you can say that access index wraps around to a valid
memory location.
*/
int s1=0, s2=4*ldb, s3=8*ldb, s4=12*ldb;
if ( iby+ty >= n ) { s1=1; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+4 >= n ) { s1=0; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+8 >= n ) { s1=0; s2=4*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+12 >= n ) { s1=0; s2=4*ldb; s3=8*ldb; s4=0*ldb; }
if ( s1 == 0 )
B += __mul24(ty, ldb);
else
s1 = 0;
const float *Bend = B + k - k % 16;
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if ( k > 15 ) {
do {
float Ab[4] = { A[0], A[lda], A[2*lda], A[3*lda] };
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
A += 4 * lda;
saxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[12][0], Cb );
saxpy( Ab[1], &Bb[13][0], Cb );
saxpy( Ab[2], &Bb[14][0], Cb );
saxpy( Ab[3], &Bb[15][0], Cb );
B += 16;
__syncthreads();
} while (B < Bend);
}
/*
Common sub expression elimination.
*/
ibx = ibx + idt - m;
/*
remembering k dimension
*/
ldb = m = k;
/*
k changed to support the generic case and reuse valuable registers
*/
k = k % 16;
m -= k;
/*
Here we are taking care of k % dim_k portions
*/
if ( k != 0 ) {
/*
Avoid Invalid Memory access in dimension K
If some thread enters this if ( ) block first access to B
should be valid as K isn't divisible by blk_K
Note that dimension N has been taken care of by s1, s2, s3, s4
But depending upon K and thread index tx, some memory access
may be still invalid, so take care of them now by
setting s1, s2, s3, s4 = 0
B might have been advanced in the previous loop, take care
of that, this is about right bottom corner.
*/
if ( m + tx >= ldb ) {
s1 = s2 = s3 = s4 = 0;
B -= tx;
}
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
for(int i=0; i < k; i++) {
saxpy( A[0], &Bb[i+0][0], Cb );
A += lda;
}
}
/*
Now taking care of dimension M, N that doesnt fit into blocks
*/
if ( (iby + 16) >= n ) {
lda = n - iby;
}
else {
lda = 16;
}
if ( ibx >= 0 )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
C[12] = alpha * Cb[12] + beta * C[12];
C[13] = alpha * Cb[13] + beta * C[13];
C[14] = alpha * Cb[14] + beta * C[14];
C[15] = alpha * Cb[15] + beta * C[15];
break;
case 15:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
C[12] = alpha * Cb[12] + beta * C[12];
C[13] = alpha * Cb[13] + beta * C[13];
C[14] = alpha * Cb[14] + beta * C[14];
break;
case 14:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
C[12] = alpha * Cb[12] + beta * C[12];
C[13] = alpha * Cb[13] + beta * C[13];
break;
case 13:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
C[12] = alpha * Cb[12] + beta * C[12];
break;
case 12:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
break;
case 11:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
break;
case 10:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
break;
case 9:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
break;
case 8:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
break;
case 7:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
break;
case 6:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
break;
case 5:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
break;
case 4:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
break;
case 3:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
break;
case 2:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
break;
case 1:
C[0] = alpha * Cb[0] + beta * C[0];
break;
case 0:
break;
}
}
extern "C" void
magmablas_sgemm_T_T_64_16_16_16_4(
float *C, const float *A, const float *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
float alpha, float beta )
{
dim3 threads( 16, 4 );
dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 );
sgemm_kernel_T_T_64_16_16_16_4<<< grid, threads, 0, magma_stream >>>
( C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
|
5a801d996efc2f6f7f1ce69a281e126d94068a9a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <torch/all.h>
#include <torch/python.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
template <typename scalar_t>
__global__ void generate_hmap_cuda_kernel(
const torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> np,
torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> hmap, float sigma){
const int n = blockIdx.x;
const int c = threadIdx.x;
const int nt = blockDim.x;
const int b = blockIdx.y;
const int mapsize = hmap.size(2) * hmap.size(3);
int i,x,y;
float e,dx,dy;
if (np[b][n][2]>0){
for (i=c;i<mapsize;i=i+nt){
x = i%hmap.size(3);
y = i/hmap.size(3);
dx = float(x) - np[b][n][0];
dy = float(y) - np[b][n][1];
if (abs(dx)>3*sigma) {continue;}
if (abs(dy)>3*sigma) {continue;}
e = (dx * dx + dy * dy) / 2 / sigma / sigma;
hmap[b][n][y][x] = exp(-e);
}
}
}
torch::Tensor generate_hmap_cuda(torch::Tensor pts, int H, int W, float sigma){
// pts should be [np,2]
const auto b = pts.size(0);
const auto np = pts.size(1);
auto hmap = torch::zeros({b, np, H, W}, pts.device());
const int threads = 1024;
const dim3 blocks(np, b);
AT_DISPATCH_FLOATING_TYPES(pts.type(), "generate_hmap_cuda", ([&] {
hipLaunchKernelGGL(( generate_hmap_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
pts.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(),
hmap.packed_accessor32<scalar_t, 4, torch::RestrictPtrTraits>(),
sigma);
}));
return hmap;
}
|
5a801d996efc2f6f7f1ce69a281e126d94068a9a.cu
|
#include <torch/all.h>
#include <torch/python.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
template <typename scalar_t>
__global__ void generate_hmap_cuda_kernel(
const torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> np,
torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> hmap, float sigma){
const int n = blockIdx.x;
const int c = threadIdx.x;
const int nt = blockDim.x;
const int b = blockIdx.y;
const int mapsize = hmap.size(2) * hmap.size(3);
int i,x,y;
float e,dx,dy;
if (np[b][n][2]>0){
for (i=c;i<mapsize;i=i+nt){
x = i%hmap.size(3);
y = i/hmap.size(3);
dx = float(x) - np[b][n][0];
dy = float(y) - np[b][n][1];
if (abs(dx)>3*sigma) {continue;}
if (abs(dy)>3*sigma) {continue;}
e = (dx * dx + dy * dy) / 2 / sigma / sigma;
hmap[b][n][y][x] = exp(-e);
}
}
}
torch::Tensor generate_hmap_cuda(torch::Tensor pts, int H, int W, float sigma){
// pts should be [np,2]
const auto b = pts.size(0);
const auto np = pts.size(1);
auto hmap = torch::zeros({b, np, H, W}, pts.device());
const int threads = 1024;
const dim3 blocks(np, b);
AT_DISPATCH_FLOATING_TYPES(pts.type(), "generate_hmap_cuda", ([&] {
generate_hmap_cuda_kernel<scalar_t><<<blocks, threads>>>(
pts.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(),
hmap.packed_accessor32<scalar_t, 4, torch::RestrictPtrTraits>(),
sigma);
}));
return hmap;
}
|
e5bab9355c493498d22726a8c598a3bcd0d04371.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ void hsv_rgb_single(float h, float s, float v, unsigned char *r, unsigned char *g, unsigned char *b)
{
// Adapted and simplified from https://github.com/jakebesworth/Simple-Color-Conversions
/* Convert hue back to 0-6 space, floor */
const float hex = h * 6;
const unsigned char primary = (int) hex;
const float secondary = hex - primary;
float x = (1.0 - s) * v;
float y = (1.0 - (s * secondary)) * v;
float z = (1.0 - (s * (1.0 - secondary))) * v;
float *rp, *gp, *bp;
switch(primary) {
case 0: rp = &v; gp = &z; bp = &x; break;
case 1: rp = &y; gp = &v; bp = &x; break;
case 2: rp = &x; gp = &v; bp = &z; break;
case 3: rp = &x; gp = &y; bp = &v; break;
case 4: rp = &z; gp = &x; bp = &v; break;
case 5:
default: rp = &v; gp = &x; bp = &y; break;
}
*r = *rp * 255.0;
*g = *gp * 255.0;
*b = *bp * 255.0;
}
__global__ void hsv_rgb(float *img, unsigned char *result, int width, int height)
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < width && y < height) {
int idx = (x + y * width) * 3;
hsv_rgb_single(img[idx], img[idx + 1], img[idx + 2], &result[idx], &result[idx + 1], &result[idx + 2]);
}
}
|
e5bab9355c493498d22726a8c598a3bcd0d04371.cu
|
__device__ void hsv_rgb_single(float h, float s, float v, unsigned char *r, unsigned char *g, unsigned char *b)
{
// Adapted and simplified from https://github.com/jakebesworth/Simple-Color-Conversions
/* Convert hue back to 0-6 space, floor */
const float hex = h * 6;
const unsigned char primary = (int) hex;
const float secondary = hex - primary;
float x = (1.0 - s) * v;
float y = (1.0 - (s * secondary)) * v;
float z = (1.0 - (s * (1.0 - secondary))) * v;
float *rp, *gp, *bp;
switch(primary) {
case 0: rp = &v; gp = &z; bp = &x; break;
case 1: rp = &y; gp = &v; bp = &x; break;
case 2: rp = &x; gp = &v; bp = &z; break;
case 3: rp = &x; gp = &y; bp = &v; break;
case 4: rp = &z; gp = &x; bp = &v; break;
case 5:
default: rp = &v; gp = &x; bp = &y; break;
}
*r = *rp * 255.0;
*g = *gp * 255.0;
*b = *bp * 255.0;
}
__global__ void hsv_rgb(float *img, unsigned char *result, int width, int height)
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < width && y < height) {
int idx = (x + y * width) * 3;
hsv_rgb_single(img[idx], img[idx + 1], img[idx + 2], &result[idx], &result[idx + 1], &result[idx + 2]);
}
}
|
c193e127eb96fd7e8c66b025c26da9d7503fab01.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include "rocblas.h"
#include "magma.h"
// includes, cuda
// matrix indexing convention
#define id(n, m, ld) (((n) * (ld) + (m)))
#define checkmem(x) do { \
if (x == 0) { \
fprintf(stderr, "! host memory allocation error: T\n"); \
return EXIT_FAILURE; \
} \
} while (0);
// declarations
int gs_pca_cublas(int, int, int, double *, double *, double *);
int print_results(int, int, int, double *, double *, double *, double *);
// main
int main(int argc, char** argv) {
hipError_t cudaStat;
hipblasStatus_t stat;
hipblasHandle_t handle;
magma_init();
// initiallize some random test data X
int N = 3, M = 3;
double *X = 0;
X = (double*)malloc(N * M * sizeof(double));
if(X == 0) {
fprintf (stderr, "! host memory allocation error: X\n"); return EXIT_FAILURE;
}
for(int n = 0; n < N; n++) {
for(int m = 0; m < M; m++) {
X[id(n,m,M)] = (double) (n * M + m); //rand() / (double)RAND_MAX;
/*printf("(%d, %d) ", n*M+m, id(n,m,M));*/
}
}
X[0] = 3;
X[1] = 3;
X[2] = 5;
X[3] = 6;
X[4] = 3;
X[5] = 6;
X[6] = 7;
X[7] = 7;
X[8] = 5;
double *dX = 0;
hipMalloc((void**)&dX, N * M * sizeof(double));
/*hipMemcpy(dX, X, N * M * sizeof(double), hipMemcpyHostToDevice);*/
hipblasSetMatrix (N, M, sizeof(double), X, N, dX, N);
double *wr = 0;// (double*) malloc(N * sizeof(double));
double *wi = 0;//(double*) malloc(N * sizeof(double));
double *V = 0;//(double*) malloc(N * N * sizeof(double));
int nb = magma_get_dgehrd_nb(N);
printf("nb: %d\n", nb);
int lwork = N*(2 + 2*nb);
double *work = 0;//(double*) malloc(lwork * sizeof(double));
magma_int_t info;
magma_malloc_cpu( (void**) &wr, (N)*sizeof(double) );
magma_malloc_cpu( (void**) &wi, (N)*sizeof(double) );
magma_malloc_cpu( (void**) &V, (N * N)*sizeof(double) );
magma_malloc_cpu( (void**) &work, (lwork)*sizeof(double) );
magma_dgeev_m(MagmaNoVec, MagmaVec, N,
X, N, wr, wi,
NULL, N, V, N,
work, lwork, &info);
if (info != 0) {
printf("info != 0\n");
}
double *evs = (double*) malloc(N * sizeof(double));
hipMemcpy(evs, wr, N * sizeof(double), hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%lf\n", wr[i]);
}
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
printf("%lf ", V[j*N + i]);
}
puts("");
}
/*for (int i = 0; i < M; i++) {*/
/*printf("%lf ", X[id(0, i, M)]);*/
/*}*/
/*puts("aa");*/
/*double *dY = 0;*/
/*double *Y = 0;*/
/*[>Y = (double*)malloc(M * sizeof(double));<]*/
/*cudaStat = hipMalloc((void**)&dY, M * sizeof(double));*/
/*puts("sadfddf");*/
/*if (cudaStat != hipSuccess) {*/
/*printf ("device memory allocation failed");*/
/*return EXIT_FAILURE;*/
/*}*/
/*puts("gg");*/
/*stat = hipblasCreate(&handle);*/
/*if (stat != HIPBLAS_STATUS_SUCCESS) { */
/*printf ("CUBLAS initialization failed\n");*/
/*return EXIT_FAILURE;*/
/*}*/
/*[>double *dY2 = 0<]*/
/*[>hipMalloc((void**)&dY2, (N-1) * (M-1) * sizeof(double));<]*/
/*[>cudaDcopy(handle, M)<]*/
/*fprintf(stderr, "ggg\n");*/
/*Y = (double*) malloc(M * sizeof(double));*/
/*[>hipblasDcopy(M, &dR[0], 1, dU, 1);<]*/
/*printf("before hipblasDcopy\n");*/
/*stat = hipblasDcopy(handle, M, dX, 1, dY, 1);*/
/*printf("done hipblasDcopy \n");*/
/*[>hipMemcpy(&Y[0], &dY[0], M * sizeof(double), hipMemcpyDeviceToHost);<]*/
/*hipblasGetMatrix (1, M, sizeof(double), dY, 1, Y, 1);*/
/*if(stat != HIPBLAS_STATUS_SUCCESS) {*/
/*printf("cublas fail 33\n");*/
/*}*/
/*for (int i = 0; i < M; i++) {*/
/*printf("%lf ",Y[i]);*/
/*}*/
/*puts("");*/
/*[>free(Y);<]*/
/*[>free(X);<]*/
}
|
c193e127eb96fd7e8c66b025c26da9d7503fab01.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <cuda_runtime.h>
#include "cublas_v2.h"
#include "magma.h"
// includes, cuda
// matrix indexing convention
#define id(n, m, ld) (((n) * (ld) + (m)))
#define checkmem(x) do { \
if (x == 0) { \
fprintf(stderr, "! host memory allocation error: T\n"); \
return EXIT_FAILURE; \
} \
} while (0);
// declarations
int gs_pca_cublas(int, int, int, double *, double *, double *);
int print_results(int, int, int, double *, double *, double *, double *);
// main
int main(int argc, char** argv) {
cudaError_t cudaStat;
cublasStatus_t stat;
cublasHandle_t handle;
magma_init();
// initiallize some random test data X
int N = 3, M = 3;
double *X = 0;
X = (double*)malloc(N * M * sizeof(double));
if(X == 0) {
fprintf (stderr, "! host memory allocation error: X\n"); return EXIT_FAILURE;
}
for(int n = 0; n < N; n++) {
for(int m = 0; m < M; m++) {
X[id(n,m,M)] = (double) (n * M + m); //rand() / (double)RAND_MAX;
/*printf("(%d, %d) ", n*M+m, id(n,m,M));*/
}
}
X[0] = 3;
X[1] = 3;
X[2] = 5;
X[3] = 6;
X[4] = 3;
X[5] = 6;
X[6] = 7;
X[7] = 7;
X[8] = 5;
double *dX = 0;
cudaMalloc((void**)&dX, N * M * sizeof(double));
/*cudaMemcpy(dX, X, N * M * sizeof(double), cudaMemcpyHostToDevice);*/
cublasSetMatrix (N, M, sizeof(double), X, N, dX, N);
double *wr = 0;// (double*) malloc(N * sizeof(double));
double *wi = 0;//(double*) malloc(N * sizeof(double));
double *V = 0;//(double*) malloc(N * N * sizeof(double));
int nb = magma_get_dgehrd_nb(N);
printf("nb: %d\n", nb);
int lwork = N*(2 + 2*nb);
double *work = 0;//(double*) malloc(lwork * sizeof(double));
magma_int_t info;
magma_malloc_cpu( (void**) &wr, (N)*sizeof(double) );
magma_malloc_cpu( (void**) &wi, (N)*sizeof(double) );
magma_malloc_cpu( (void**) &V, (N * N)*sizeof(double) );
magma_malloc_cpu( (void**) &work, (lwork)*sizeof(double) );
magma_dgeev_m(MagmaNoVec, MagmaVec, N,
X, N, wr, wi,
NULL, N, V, N,
work, lwork, &info);
if (info != 0) {
printf("info != 0\n");
}
double *evs = (double*) malloc(N * sizeof(double));
cudaMemcpy(evs, wr, N * sizeof(double), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%lf\n", wr[i]);
}
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
printf("%lf ", V[j*N + i]);
}
puts("");
}
/*for (int i = 0; i < M; i++) {*/
/*printf("%lf ", X[id(0, i, M)]);*/
/*}*/
/*puts("aa");*/
/*double *dY = 0;*/
/*double *Y = 0;*/
/*[>Y = (double*)malloc(M * sizeof(double));<]*/
/*cudaStat = cudaMalloc((void**)&dY, M * sizeof(double));*/
/*puts("sadfddf");*/
/*if (cudaStat != cudaSuccess) {*/
/*printf ("device memory allocation failed");*/
/*return EXIT_FAILURE;*/
/*}*/
/*puts("gg");*/
/*stat = cublasCreate(&handle);*/
/*if (stat != CUBLAS_STATUS_SUCCESS) { */
/*printf ("CUBLAS initialization failed\n");*/
/*return EXIT_FAILURE;*/
/*}*/
/*[>double *dY2 = 0<]*/
/*[>cudaMalloc((void**)&dY2, (N-1) * (M-1) * sizeof(double));<]*/
/*[>cudaDcopy(handle, M)<]*/
/*fprintf(stderr, "ggg\n");*/
/*Y = (double*) malloc(M * sizeof(double));*/
/*[>cublasDcopy(M, &dR[0], 1, dU, 1);<]*/
/*printf("before cublasDcopy\n");*/
/*stat = cublasDcopy(handle, M, dX, 1, dY, 1);*/
/*printf("done cublasDcopy \n");*/
/*[>cudaMemcpy(&Y[0], &dY[0], M * sizeof(double), cudaMemcpyDeviceToHost);<]*/
/*cublasGetMatrix (1, M, sizeof(double), dY, 1, Y, 1);*/
/*if(stat != CUBLAS_STATUS_SUCCESS) {*/
/*printf("cublas fail 33\n");*/
/*}*/
/*for (int i = 0; i < M; i++) {*/
/*printf("%lf ",Y[i]);*/
/*}*/
/*puts("");*/
/*[>free(Y);<]*/
/*[>free(X);<]*/
}
|
d448f4d353ecdd0f8c146ca3ad024ce56b359c76.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 20.04.2018
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <array/NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
// x - input, y - indices, z - output
template<typename X, typename Y>
__global__ static void gatherNDCuda(const void *vx, const Nd4jLong *xShapeInfo,
const void *vy, const Nd4jLong *yShapeInfo,
void *vz, const Nd4jLong *zShapeInfo) {
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Y*>(vy);
auto z = reinterpret_cast<X*>(vz);
__shared__ int xRank, yRank, zRank, maxRank, yLastDim;
__shared__ Nd4jLong zLen, totalThreads, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
xRank = shape::rank(xShapeInfo);
yRank = shape::rank(yShapeInfo);
zRank = shape::rank(zShapeInfo);
maxRank = sd::math::nd4j_max<int>(yRank, sd::math::nd4j_max<int>(xRank, zRank));
zLen = shape::length(zShapeInfo);
yLastDim = yShapeInfo[yRank];
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coord = sharedMem + threadIdx.x * maxRank;
Nd4jLong *zCoordStart, *xCoordStart;
if(yLastDim == xRank) {
zCoordStart = coord;
xCoordStart = coord;
}
if(zRank >= xRank) {
zCoordStart = coord;
xCoordStart = coord + zRank - xRank;
}
else {
zCoordStart = coord + xRank - zRank;
xCoordStart = coord;
}
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, zCoordStart);
const auto zOffset = shape::getOffset(zShapeInfo, zCoordStart);
// last y coordinate
int coordToRestore;
if(yLastDim != xRank)
coordToRestore = static_cast<int>(zCoordStart[yRank - 1]);
zCoordStart[yRank - 1] = 0; // last y coordinate
const auto yOffset = shape::getOffset(yShapeInfo, zCoordStart);
//restore z coordinate
if(yLastDim != xRank)
zCoordStart[yRank - 1] = coordToRestore;
// construct coordinates for x
for(uint j = 0; j < yLastDim; ++j)
xCoordStart[j] = y[yOffset + j * yShapeInfo[2 * yRank]]; // last stride
const auto xOffset = shape::getOffset(xShapeInfo, xCoordStart);
z[zOffset] = x[xOffset];
// printf("z[%lld] = x[%lld] = %f\n", zOffset, xOffset, (float) z[zOffset]);
}
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
static void gatherNDCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void *vx, const Nd4jLong *xShapeInfo,
const void *vy, const Nd4jLong *yShapeInfo,
void *vz, const Nd4jLong *zShapeInfo) {
hipLaunchKernelGGL(( gatherNDCuda<X,Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
void gatherND(sd::LaunchContext * context, NDArray& input, NDArray& indices, NDArray& output) {
const int maxRank = sd::math::nd4j_max<int>(indices.rankOf(), sd::math::nd4j_max<int>(input.rankOf(), output.rankOf()));
const int threadsPerBlock = 256;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = 8 * threadsPerBlock * maxRank + 128;
const auto xType = input.dataType();
const auto yType = indices.dataType();
PointersManager manager(context, "gatherND");
NDArray::prepareSpecialUse({&output}, {&input, &indices});
BUILD_DOUBLE_SELECTOR(xType, yType, gatherNDCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), indices.specialBuffer(), indices.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo()), LIBND4J_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({&output}, {&input, &indices});
manager.synchronize();
}
}
}
}
|
d448f4d353ecdd0f8c146ca3ad024ce56b359c76.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 20.04.2018
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <array/NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
// x - input, y - indices, z - output
template<typename X, typename Y>
__global__ static void gatherNDCuda(const void *vx, const Nd4jLong *xShapeInfo,
const void *vy, const Nd4jLong *yShapeInfo,
void *vz, const Nd4jLong *zShapeInfo) {
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Y*>(vy);
auto z = reinterpret_cast<X*>(vz);
__shared__ int xRank, yRank, zRank, maxRank, yLastDim;
__shared__ Nd4jLong zLen, totalThreads, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
xRank = shape::rank(xShapeInfo);
yRank = shape::rank(yShapeInfo);
zRank = shape::rank(zShapeInfo);
maxRank = sd::math::nd4j_max<int>(yRank, sd::math::nd4j_max<int>(xRank, zRank));
zLen = shape::length(zShapeInfo);
yLastDim = yShapeInfo[yRank];
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coord = sharedMem + threadIdx.x * maxRank;
Nd4jLong *zCoordStart, *xCoordStart;
if(yLastDim == xRank) {
zCoordStart = coord;
xCoordStart = coord;
}
if(zRank >= xRank) {
zCoordStart = coord;
xCoordStart = coord + zRank - xRank;
}
else {
zCoordStart = coord + xRank - zRank;
xCoordStart = coord;
}
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, zCoordStart);
const auto zOffset = shape::getOffset(zShapeInfo, zCoordStart);
// last y coordinate
int coordToRestore;
if(yLastDim != xRank)
coordToRestore = static_cast<int>(zCoordStart[yRank - 1]);
zCoordStart[yRank - 1] = 0; // last y coordinate
const auto yOffset = shape::getOffset(yShapeInfo, zCoordStart);
//restore z coordinate
if(yLastDim != xRank)
zCoordStart[yRank - 1] = coordToRestore;
// construct coordinates for x
for(uint j = 0; j < yLastDim; ++j)
xCoordStart[j] = y[yOffset + j * yShapeInfo[2 * yRank]]; // last stride
const auto xOffset = shape::getOffset(xShapeInfo, xCoordStart);
z[zOffset] = x[xOffset];
// printf("z[%lld] = x[%lld] = %f\n", zOffset, xOffset, (float) z[zOffset]);
}
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
static void gatherNDCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void *vx, const Nd4jLong *xShapeInfo,
const void *vy, const Nd4jLong *yShapeInfo,
void *vz, const Nd4jLong *zShapeInfo) {
gatherNDCuda<X,Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
void gatherND(sd::LaunchContext * context, NDArray& input, NDArray& indices, NDArray& output) {
const int maxRank = sd::math::nd4j_max<int>(indices.rankOf(), sd::math::nd4j_max<int>(input.rankOf(), output.rankOf()));
const int threadsPerBlock = 256;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = 8 * threadsPerBlock * maxRank + 128;
const auto xType = input.dataType();
const auto yType = indices.dataType();
PointersManager manager(context, "gatherND");
NDArray::prepareSpecialUse({&output}, {&input, &indices});
BUILD_DOUBLE_SELECTOR(xType, yType, gatherNDCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), indices.specialBuffer(), indices.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo()), LIBND4J_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({&output}, {&input, &indices});
manager.synchronize();
}
}
}
}
|
be1f7f550db84c9aa427e218211d1d4f7ebcfd95.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Find high divergence points of a vector field
// --- Input: 1. normalized 3D vector field
//
// dFx dFy dFz
// divergence = ----- + ----- + -----
// dx dy dz
//
// --- Output: highest ...% divergence point list
// --- Author: Nicu D. Cornea, Vizlab, Rutgers University
// --- Date: Wed Aug 20 17:53:56 EDT 2003
//
#include "HighDiverg.h"
// #define TRACE
#define SEARCH_GRID 1
#define CELL_SIZE 1.00 / SEARCH_GRID
#define MAX_NUM_HDPTS 5000
typedef struct Lock
{
int *mutex;
Lock()
{
int state=0;
int a=1;
hipMalloc((void **)&mutex,sizeof(int));
hipMemcpy(mutex,&a,sizeof(int),hipMemcpyHostToDevice);
}
~Lock()
{
hipFree(mutex);
}
__device__ void lock()
{
while(atomicCAS(mutex, 0, 1) != 0);
}
__device__ void unlock()
{
atomicExch(mutex, 0);
}
}Lock;
typedef struct {
int* Points;
int numPoints;
} HDGroup;
inline bool PointIsCloseToGroup(int pt, int grp, HDGroup *Groups, VoxelPositionDouble **HDPts);
__device__ __host__ inline Vector interpolation(double x, double y, double z, int sizx, int sizy, int sizz, Vector *forcevec);
__global__ void max_min_divergence(unsigned char *flags,Vector *ForceField,double *maxDiv,double *minDiv,bool inOut,int slsz,int L,int M, int N,double vdist, Lock mylock)
{
double x, y, z;
int k=blockIdx.x;
int j=blockIdx.y;
int i=blockIdx.z;
double div;
int idx=k*slsz + j*L +i;
if(!inOut) {
// - if this point is EXTERIOR, BOUNDARY or SURF, skip it
if( (flags[idx] == EXTERIOR) ||
(flags[idx] == BOUNDARY) ||
(flags[idx] == SURF))
{
return;
}
}
else {
// we look for high divergence points outside the object too
// ignore only boundary points.
if( (flags[idx] == BOUNDARY) ||
(flags[idx] == SURF))
{
return;
}
}
for(int kk=0; kk < SEARCH_GRID; kk++) {
for(int jj=0; jj < SEARCH_GRID; jj++) {
for(int ii=0; ii < SEARCH_GRID; ii++) {
x = i + (ii * CELL_SIZE);
y = j + (jj * CELL_SIZE);
z = k + (kk * CELL_SIZE);
#ifdef TRACE
// printf("At point: (%lf, %lf, %lf)\n", x, y, z);
#endif
// interpolate force vectors arround the point
Vector v_0 = interpolation(x + vdist, y, z, L, M, N, ForceField);
Vector v_1 = interpolation(x - vdist, y, z, L, M, N, ForceField);
Vector v_2 = interpolation(x, y + vdist, z, L, M, N, ForceField);
Vector v_3 = interpolation(x, y - vdist, z, L, M, N, ForceField);
Vector v_4 = interpolation(x, y, z + vdist, L, M, N, ForceField);
Vector v_5 = interpolation(x, y, z - vdist, L, M, N, ForceField);
div = ((v_0.xd - v_1.xd) + (v_2.yd - v_3.yd) + (v_4.zd - v_5.zd)) / (2 * vdist);
#ifdef TRACE
/*
printf("Forces:\n");
for(s = 0; s < 6; s++) {
printf("%lf %lf %lf\n", v[s].xd, v[s].yd, v[s].zd);
}
printf("Div = %lf\n", div);
*/
#endif
mylock.lock();
if(div > *maxDiv)
{
*maxDiv = div;
}
if(div < *minDiv)
{
*minDiv = div;
}div;
mylock.unlock();
}
}
}
}
// double GetDiv(double x, double y, double z);
bool GetHighDivergencePoints(
Vector* ForceField, // [in] vector field
int L, int M, int N, // [in] size of vector field (X, Y and Z)
unsigned char *flags, // [in] flags array
float perc, // [in] percentage of high div. points
// to be returned (top <perc> %)
VoxelPositionDouble **HDPts, // [out] high divergence point list
int *numHDPts, // [out] number of points in the list
bool inOut // [in] flag specifying if we should look
// outside the object too (if true).
// DEFAULT: false
) {
#ifdef TRACE
printf("TRACE: Starting GetHighDivergencePoints function. Cellsize = %lf\n", CELL_SIZE);
#endif
(*HDPts) = NULL;
(*numHDPts) = 0;
if(perc == 0) {
return true;
}
long idx, slsz;
int i,j,k, ii, jj, kk, s;
double x, y, z;
long cntz, cntnz;
slsz = L*M; // slice size
double adiv[MAX_NUM_HDPTS]; // divergence array
if(((*HDPts) = new VoxelPositionDouble[MAX_NUM_HDPTS]) == NULL) {
printf("GetHighDivergencePoints: UPS! - Error allocating memory for the output array. Abort.\n");
exit(1);
}
// calculate divergence throughout the dataset
double maxDiv = -999999.99;
double minDiv = 999999.99;
double div;
cntz = 0;
cntnz = 0;
double zerodiv = 0.1;
/////////////////////////////////////
Vector v[6];
double vdist = (CELL_SIZE) / 2.00;
#ifdef TRACE
printf("vdist = %lf\n", vdist);
#endif
printf("Finding high divergence points (1).\n");
unsigned char *d_flags;
Vector *d_ForceField;
double *d_maxDiv;
double *d_minDiv;
Lock mylock;
int *p=(int *)malloc(sizeof(int));
hipMalloc((void **)&d_flags,sizeof(unsigned char)*L*M*N);
hipMalloc((void **)&d_ForceField,sizeof(Vector)*L*M*N);
hipMalloc((void **)&d_minDiv,sizeof(double));
hipMalloc((void **)&d_maxDiv,sizeof(double));
hipMemcpy(d_flags,flags,sizeof(unsigned char)*L*M*N,hipMemcpyHostToDevice);
hipMemcpy(d_ForceField,ForceField,sizeof(Vector)*L*M*N,hipMemcpyHostToDevice);
hipMemcpy(d_maxDiv,&maxDiv,sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(d_minDiv,&minDiv,sizeof(double),hipMemcpyHostToDevice);
dim3 dimBlock(1);
dim3 dimGrid(N,M,L);
hipLaunchKernelGGL(( max_min_divergence), dim3(dimGrid),dim3(dimBlock), 0, 0, d_flags,d_ForceField,d_maxDiv,d_minDiv,inOut,slsz,L,M,N,vdist, mylock);
hipMemcpy(&maxDiv,d_maxDiv,sizeof(double),hipMemcpyDeviceToHost);
hipMemcpy(&minDiv,d_minDiv,sizeof(double),hipMemcpyDeviceToHost);
// for (k = 1; k < N-1; k++) {
// printf("\tProcessing plane %d out of %d\r", k, N-2);
// fflush(stdout);
// for (j = 1; j < M-1; j++) {
// for (i = 1; i < L-1; i++) {
// idx = k*slsz + j*L +i;
// // if we are not looking outside the object too.
// if(!inOut) {
// // - if this point is EXTERIOR, BOUNDARY or SURF, skip it
// if( (flags[idx] == EXTERIOR) ||
// (flags[idx] == BOUNDARY) ||
// (flags[idx] == SURF))
// {
// continue;
// }
// }
// else {
// // we look for high divergence points outside the object too
// // ignore only boundary points.
// if( (flags[idx] == BOUNDARY) ||
// (flags[idx] == SURF))
// {
// continue;
// }
// }
// for(kk=0; kk < SEARCH_GRID; kk++) {
// for(jj=0; jj < SEARCH_GRID; jj++) {
// for(ii=0; ii < SEARCH_GRID; ii++) {
// x = i + (ii * CELL_SIZE);
// y = j + (jj * CELL_SIZE);
// z = k + (kk * CELL_SIZE);
// #ifdef TRACE
// // printf("At point: (%lf, %lf, %lf)\n", x, y, z);
// #endif
// // interpolate force vectors arround the point
// v[0] = interpolation(x + vdist, y, z, L, M, N, ForceField);
// v[1] = interpolation(x - vdist, y, z, L, M, N, ForceField);
// v[2] = interpolation(x, y + vdist, z, L, M, N, ForceField);
// v[3] = interpolation(x, y - vdist, z, L, M, N, ForceField);
// v[4] = interpolation(x, y, z + vdist, L, M, N, ForceField);
// v[5] = interpolation(x, y, z - vdist, L, M, N, ForceField);
// div = ((v[0].xd - v[1].xd) + (v[2].yd - v[3].yd) + (v[4].zd - v[5].zd)) / (2 * vdist);
// if((div > -zerodiv) && (div < zerodiv)) {
// cntz++;
// }
// else {
// cntnz++;
// }
// #ifdef TRACE
// /*
// printf("Forces:\n");
// for(s = 0; s < 6; s++) {
// printf("%lf %lf %lf\n", v[s].xd, v[s].yd, v[s].zd);
// }
// printf("Div = %lf\n", div);
// */
// #endif
// if(div > maxDiv) {
// maxDiv = div;
// }
// if(div < minDiv) {
// minDiv = div;
// }div;
// }
// }
// }
// }
// }
// }
// #ifdef _DEBUG
printf("Divergence: max = %lf, min = %lf\n", maxDiv, minDiv);
// #endif
double threshold;
// case 1:
// take <perc> percent of the lowest negative value:
// !! have to change the comparison
threshold = maxDiv - minDiv;
threshold = ((double)perc / 100.00) * threshold;
threshold = minDiv + threshold;
/*
// case 2:
// take <perc> percent of the highest pozitive value:
// !! have to change the comparison
// NOT GOOD
threshold = maxDiv - minDiv;
threshold = ((double)perc / 100.00) * threshold;
threshold = maxDiv - threshold;
*/
/*
// case 3:
// take <perc> percent of the lowest value (must be negative):
// !! have to change the comparison
// NOT GOOD
threshold = minDiv;
threshold = ((double)perc / 100.00) * threshold;
threshold = minDiv - threshold;
*/
#ifdef _DEBUG
printf("Threshold set to: %lf\n", threshold);
printf("Number of close to 0 divergence points [-%lf..%lf]: %ld. \n \
Number of non 0 divergence points: %ld.\n",
zerodiv, zerodiv, cntz, cntnz);
#endif
printf("Finding high divergence points (2).\n");
for (k = 1; k < N-1; k++) {
printf("\tProcessing plane %d out of %d\r", k, N-2);
fflush(stdout);
for (j = 1; j < M-1; j++) {
for (i = 1; i < L-1; i++) {
idx = k*slsz + j*L +i;
if(!inOut) {
// - if this point is EXTERIOR, BOUNDARY or SURF, skip it
if( (flags[idx] == EXTERIOR) ||
(flags[idx] == BOUNDARY) ||
(flags[idx] == SURF))
{
continue;
}
}
else {
// we look for high divergence points outside the object too
// ignore only boundary points.
if( (flags[idx] == BOUNDARY) ||
(flags[idx] == SURF))
{
continue;
}
}
for(kk=0; kk < SEARCH_GRID; kk++) {
for(jj=0; jj < SEARCH_GRID; jj++) {
for(ii=0; ii < SEARCH_GRID; ii++) {
x = i + (ii * CELL_SIZE);
y = j + (jj * CELL_SIZE);
z = k + (kk * CELL_SIZE);
#ifdef TRACE
// printf("At point: (%lf, %lf, %lf)\n", x, y, z);
#endif
// interpolate force vectors arround the point
v[0] = interpolation(x + vdist, y, z, L, M, N, ForceField);
v[1] = interpolation(x - vdist, y, z, L, M, N, ForceField);
v[2] = interpolation(x, y + vdist, z, L, M, N, ForceField);
v[3] = interpolation(x, y - vdist, z, L, M, N, ForceField);
v[4] = interpolation(x, y, z + vdist, L, M, N, ForceField);
v[5] = interpolation(x, y, z - vdist, L, M, N, ForceField);
div = ((v[0].xd - v[1].xd) + (v[2].yd - v[3].yd) + (v[4].zd - v[5].zd)) / (2 * vdist);
#ifdef TRACE
/*
printf("Forces:\n");
for(s = 0; s < 6; s++) {
printf("%lf %lf %lf\n", v[s].xd, v[s].yd, v[s].zd);
}
printf("Div = %lf\n", div);
*/
#endif
if(div <= threshold) {
// add the point to the HD list
(*HDPts)[(*numHDPts)].x = x;
(*HDPts)[(*numHDPts)].y = y;
(*HDPts)[(*numHDPts)].z = z;
adiv[(*numHDPts)] = div;
(*numHDPts) = (*numHDPts) + 1;
if((*numHDPts) >= MAX_NUM_HDPTS) {
printf("UPS! Too many high divergence points detected. \
Reached maximum of %d. Abort\n", MAX_NUM_HDPTS);
exit(1);
}
}
}
}
}
}
}
}
//
// sort the points on the divergence value;
//
double minval, tmp;
int minpos;
for(i=0; i < (*numHDPts); i++) {
minval = adiv[i];
minpos = i;
for(j=i+1; j < (*numHDPts); j++) {
if(adiv[j] < minval) {
minval = adiv[j];
minpos = j;
}
}
if(minpos != i) {
// exchange points and div values
tmp = adiv[i];
adiv[i] = adiv[minpos];
adiv[minpos] = tmp;
tmp = (*HDPts)[i].x; (*HDPts)[i].x = (*HDPts)[minpos].x; (*HDPts)[minpos].x = tmp;
tmp = (*HDPts)[i].y; (*HDPts)[i].y = (*HDPts)[minpos].y; (*HDPts)[minpos].y = tmp;
tmp = (*HDPts)[i].z; (*HDPts)[i].z = (*HDPts)[minpos].z; (*HDPts)[minpos].z = tmp;
}
}
#ifdef TRACE
printf("Points: \n");
for(i=0; i < (*numHDPts); i++) {
printf("%f %f %f - %f\n", (*HDPts)[i].x, (*HDPts)[i].y, (*HDPts)[i].z, adiv[i]);
}
#endif
//
// cluster the points
//
// Algorithm:
// First point creates the first group.
// For all the other points:
// If the point is close to an existing group
// add the point to that group
// else
// the point starts a new group
// endif
// endfor
// end
//
// initialize data structure
HDGroup *Groups;
int numGroups = 0;
if((Groups = new HDGroup[(*numHDPts)]) == NULL) {
printf("Error allocating memory for working data structures. Abort\n");
exit(1);
}
for(i=0; i < (*numHDPts); i++) {
if((Groups[i].Points = new int[(*numHDPts)]) == NULL) {
printf("Error allocating memory for working data structures. Abort\n");
exit(1);
}
Groups[i].numPoints = 0;
}
bool closeToSomeGroup = false;
// first point creates the first group
Groups[0].Points[0] = 0;
Groups[0].numPoints = 1;
numGroups = 1;
for(i=1; i < (*numHDPts); i++) {
closeToSomeGroup = false;
for(j=0; j < numGroups; j++) {
if(PointIsCloseToGroup(i, j, Groups, HDPts)) {
// add the point to that group
Groups[j].Points[Groups[j].numPoints] = i;
Groups[j].numPoints = Groups[j].numPoints + 1;
closeToSomeGroup = true;
break;
}
}
if(!closeToSomeGroup) {
// start a new group
Groups[numGroups].Points[0] = i;
Groups[numGroups].numPoints = 1;
numGroups++;
}
}
#ifdef TRACE
// print the clustered points:
printf("Clustered points:\n");
for(i=0; i < numGroups; i++) {
printf("%f %f %f\n",
(*HDPts)[Groups[i].Points[0]].x, (*HDPts)[Groups[i].Points[0]].y, (*HDPts)[Groups[i].Points[0]].z);
for(j=1; j < Groups[i].numPoints; j++) {
printf("\t%f %f %f\n",
(*HDPts)[Groups[i].Points[j]].x, (*HDPts)[Groups[i].Points[j]].y, (*HDPts)[Groups[i].Points[j]].z);
}
}
#endif
//
// Return only the first point in each group as the high divergence points
//
VoxelPositionDouble* newHDPts;
if((newHDPts = new VoxelPositionDouble[numGroups]) == NULL) {
printf("GetHighDivergencePoints: UPS! - Error allocating memory for the output array. Abort.\n");
exit(1);
}
for(i=0; i < numGroups; i++) {
newHDPts[i].x = (*HDPts)[Groups[i].Points[0]].x;
newHDPts[i].y = (*HDPts)[Groups[i].Points[0]].y;
newHDPts[i].z = (*HDPts)[Groups[i].Points[0]].z;
}
// delete the old array
delete [] (*HDPts);
// delete Group data structure
for(i=0; i < numGroups; i++) {
delete [] Groups[i].Points;
}
delete [] Groups;
// return the new array
(*HDPts) = newHDPts;
(*numHDPts) = numGroups;
#ifdef TRACE
printf("Returning points: \n");
for(i=0; i < (*numHDPts); i++) {
printf("%f %f %f - %f\n", (*HDPts)[i].x, (*HDPts)[i].y, (*HDPts)[i].z, adiv[i]);
}
#endif
return true;
}
__device__ __host__ Vector interpolation(double x, double y, double z, int sizx, int sizy, int sizz, Vector *forcevec)
{
float alpha, beta, gamma;
Vector forceInt;
long slsz;
alpha=x-int(x);
beta=y-int(y);
gamma=z-int(z);
slsz=sizy*sizx;
forceInt.xd=forcevec[int(z)*slsz + int(y)*sizx + int(x)].xd*(1-alpha)*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + int(x)].xd*(1-alpha)*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + int(x)].xd*(1-alpha)*beta*(1-gamma)
+forcevec[int(z)*slsz + int(y)*sizx + (int(x)+1)].xd*alpha*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + (int(x)+1)].xd*alpha*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + (int(x)+1)].xd*alpha*beta*(1-gamma)
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + int(x)].xd*(1-alpha)*beta*gamma
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + (int(x)+1)].xd*(alpha*beta*gamma);
forceInt.yd=forcevec[int(z)*slsz + int(y)*sizx + int(x)].yd*(1-alpha)*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + int(x)].yd*(1-alpha)*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + int(x)].yd*(1-alpha)*beta*(1-gamma)
+forcevec[int(z)*slsz + int(y)*sizx + (int(x)+1)].yd*alpha*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + (int(x)+1)].yd*alpha*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + (int(x)+1)].yd*alpha*beta*(1-gamma)
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + int(x)].yd*(1-alpha)*beta*gamma
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + (int(x)+1)].yd*alpha*beta*gamma;
forceInt.zd=forcevec[int(z)*slsz + int(y)*sizx + int(x)].zd*(1-alpha)*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + int(x)].zd*(1-alpha)*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + int(x)].zd*(1-alpha)*beta*(1-gamma)
+forcevec[int(z)*slsz + int(y)*sizx + (int(x)+1)].zd*alpha*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + (int(x)+1)].zd*alpha*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + (int(x)+1)].zd*alpha*beta*(1-gamma)
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + int(x)].zd*(1-alpha)*beta*gamma
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + (int(x)+1)].zd*alpha*beta*gamma;
return(forceInt);
}
inline bool PointIsCloseToGroup(int pt, int grp, HDGroup *Groups, VoxelPositionDouble **HDPts) {
int i;
for(i=0; i < Groups[grp].numPoints; i++) {
if(
(fabs((*HDPts)[pt].x - (*HDPts)[Groups[grp].Points[i]].x) <= 1) &&
(fabs((*HDPts)[pt].y - (*HDPts)[Groups[grp].Points[i]].y) <= 1) &&
(fabs((*HDPts)[pt].z - (*HDPts)[Groups[grp].Points[i]].z) <= 1))
{
return true;
}
}
return false;
}
|
be1f7f550db84c9aa427e218211d1d4f7ebcfd95.cu
|
// Find high divergence points of a vector field
// --- Input: 1. normalized 3D vector field
//
// dFx dFy dFz
// divergence = ----- + ----- + -----
// dx dy dz
//
// --- Output: highest ...% divergence point list
// --- Author: Nicu D. Cornea, Vizlab, Rutgers University
// --- Date: Wed Aug 20 17:53:56 EDT 2003
//
#include "HighDiverg.h"
// #define TRACE
#define SEARCH_GRID 1
#define CELL_SIZE 1.00 / SEARCH_GRID
#define MAX_NUM_HDPTS 5000
typedef struct Lock
{
int *mutex;
Lock()
{
int state=0;
int a=1;
cudaMalloc((void **)&mutex,sizeof(int));
cudaMemcpy(mutex,&a,sizeof(int),cudaMemcpyHostToDevice);
}
~Lock()
{
cudaFree(mutex);
}
__device__ void lock()
{
while(atomicCAS(mutex, 0, 1) != 0);
}
__device__ void unlock()
{
atomicExch(mutex, 0);
}
}Lock;
typedef struct {
int* Points;
int numPoints;
} HDGroup;
inline bool PointIsCloseToGroup(int pt, int grp, HDGroup *Groups, VoxelPositionDouble **HDPts);
__device__ __host__ inline Vector interpolation(double x, double y, double z, int sizx, int sizy, int sizz, Vector *forcevec);
__global__ void max_min_divergence(unsigned char *flags,Vector *ForceField,double *maxDiv,double *minDiv,bool inOut,int slsz,int L,int M, int N,double vdist, Lock mylock)
{
double x, y, z;
int k=blockIdx.x;
int j=blockIdx.y;
int i=blockIdx.z;
double div;
int idx=k*slsz + j*L +i;
if(!inOut) {
// - if this point is EXTERIOR, BOUNDARY or SURF, skip it
if( (flags[idx] == EXTERIOR) ||
(flags[idx] == BOUNDARY) ||
(flags[idx] == SURF))
{
return;
}
}
else {
// we look for high divergence points outside the object too
// ignore only boundary points.
if( (flags[idx] == BOUNDARY) ||
(flags[idx] == SURF))
{
return;
}
}
for(int kk=0; kk < SEARCH_GRID; kk++) {
for(int jj=0; jj < SEARCH_GRID; jj++) {
for(int ii=0; ii < SEARCH_GRID; ii++) {
x = i + (ii * CELL_SIZE);
y = j + (jj * CELL_SIZE);
z = k + (kk * CELL_SIZE);
#ifdef TRACE
// printf("At point: (%lf, %lf, %lf)\n", x, y, z);
#endif
// interpolate force vectors arround the point
Vector v_0 = interpolation(x + vdist, y, z, L, M, N, ForceField);
Vector v_1 = interpolation(x - vdist, y, z, L, M, N, ForceField);
Vector v_2 = interpolation(x, y + vdist, z, L, M, N, ForceField);
Vector v_3 = interpolation(x, y - vdist, z, L, M, N, ForceField);
Vector v_4 = interpolation(x, y, z + vdist, L, M, N, ForceField);
Vector v_5 = interpolation(x, y, z - vdist, L, M, N, ForceField);
div = ((v_0.xd - v_1.xd) + (v_2.yd - v_3.yd) + (v_4.zd - v_5.zd)) / (2 * vdist);
#ifdef TRACE
/*
printf("Forces:\n");
for(s = 0; s < 6; s++) {
printf("%lf %lf %lf\n", v[s].xd, v[s].yd, v[s].zd);
}
printf("Div = %lf\n", div);
*/
#endif
mylock.lock();
if(div > *maxDiv)
{
*maxDiv = div;
}
if(div < *minDiv)
{
*minDiv = div;
}div;
mylock.unlock();
}
}
}
}
// double GetDiv(double x, double y, double z);
bool GetHighDivergencePoints(
Vector* ForceField, // [in] vector field
int L, int M, int N, // [in] size of vector field (X, Y and Z)
unsigned char *flags, // [in] flags array
float perc, // [in] percentage of high div. points
// to be returned (top <perc> %)
VoxelPositionDouble **HDPts, // [out] high divergence point list
int *numHDPts, // [out] number of points in the list
bool inOut // [in] flag specifying if we should look
// outside the object too (if true).
// DEFAULT: false
) {
#ifdef TRACE
printf("TRACE: Starting GetHighDivergencePoints function. Cellsize = %lf\n", CELL_SIZE);
#endif
(*HDPts) = NULL;
(*numHDPts) = 0;
if(perc == 0) {
return true;
}
long idx, slsz;
int i,j,k, ii, jj, kk, s;
double x, y, z;
long cntz, cntnz;
slsz = L*M; // slice size
double adiv[MAX_NUM_HDPTS]; // divergence array
if(((*HDPts) = new VoxelPositionDouble[MAX_NUM_HDPTS]) == NULL) {
printf("GetHighDivergencePoints: UPS! - Error allocating memory for the output array. Abort.\n");
exit(1);
}
// calculate divergence throughout the dataset
double maxDiv = -999999.99;
double minDiv = 999999.99;
double div;
cntz = 0;
cntnz = 0;
double zerodiv = 0.1;
/////////////////////////////////////
Vector v[6];
double vdist = (CELL_SIZE) / 2.00;
#ifdef TRACE
printf("vdist = %lf\n", vdist);
#endif
printf("Finding high divergence points (1).\n");
unsigned char *d_flags;
Vector *d_ForceField;
double *d_maxDiv;
double *d_minDiv;
Lock mylock;
int *p=(int *)malloc(sizeof(int));
cudaMalloc((void **)&d_flags,sizeof(unsigned char)*L*M*N);
cudaMalloc((void **)&d_ForceField,sizeof(Vector)*L*M*N);
cudaMalloc((void **)&d_minDiv,sizeof(double));
cudaMalloc((void **)&d_maxDiv,sizeof(double));
cudaMemcpy(d_flags,flags,sizeof(unsigned char)*L*M*N,cudaMemcpyHostToDevice);
cudaMemcpy(d_ForceField,ForceField,sizeof(Vector)*L*M*N,cudaMemcpyHostToDevice);
cudaMemcpy(d_maxDiv,&maxDiv,sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(d_minDiv,&minDiv,sizeof(double),cudaMemcpyHostToDevice);
dim3 dimBlock(1);
dim3 dimGrid(N,M,L);
max_min_divergence<<<dimGrid,dimBlock>>>(d_flags,d_ForceField,d_maxDiv,d_minDiv,inOut,slsz,L,M,N,vdist, mylock);
cudaMemcpy(&maxDiv,d_maxDiv,sizeof(double),cudaMemcpyDeviceToHost);
cudaMemcpy(&minDiv,d_minDiv,sizeof(double),cudaMemcpyDeviceToHost);
// for (k = 1; k < N-1; k++) {
// printf("\tProcessing plane %d out of %d\r", k, N-2);
// fflush(stdout);
// for (j = 1; j < M-1; j++) {
// for (i = 1; i < L-1; i++) {
// idx = k*slsz + j*L +i;
// // if we are not looking outside the object too.
// if(!inOut) {
// // - if this point is EXTERIOR, BOUNDARY or SURF, skip it
// if( (flags[idx] == EXTERIOR) ||
// (flags[idx] == BOUNDARY) ||
// (flags[idx] == SURF))
// {
// continue;
// }
// }
// else {
// // we look for high divergence points outside the object too
// // ignore only boundary points.
// if( (flags[idx] == BOUNDARY) ||
// (flags[idx] == SURF))
// {
// continue;
// }
// }
// for(kk=0; kk < SEARCH_GRID; kk++) {
// for(jj=0; jj < SEARCH_GRID; jj++) {
// for(ii=0; ii < SEARCH_GRID; ii++) {
// x = i + (ii * CELL_SIZE);
// y = j + (jj * CELL_SIZE);
// z = k + (kk * CELL_SIZE);
// #ifdef TRACE
// // printf("At point: (%lf, %lf, %lf)\n", x, y, z);
// #endif
// // interpolate force vectors arround the point
// v[0] = interpolation(x + vdist, y, z, L, M, N, ForceField);
// v[1] = interpolation(x - vdist, y, z, L, M, N, ForceField);
// v[2] = interpolation(x, y + vdist, z, L, M, N, ForceField);
// v[3] = interpolation(x, y - vdist, z, L, M, N, ForceField);
// v[4] = interpolation(x, y, z + vdist, L, M, N, ForceField);
// v[5] = interpolation(x, y, z - vdist, L, M, N, ForceField);
// div = ((v[0].xd - v[1].xd) + (v[2].yd - v[3].yd) + (v[4].zd - v[5].zd)) / (2 * vdist);
// if((div > -zerodiv) && (div < zerodiv)) {
// cntz++;
// }
// else {
// cntnz++;
// }
// #ifdef TRACE
// /*
// printf("Forces:\n");
// for(s = 0; s < 6; s++) {
// printf("%lf %lf %lf\n", v[s].xd, v[s].yd, v[s].zd);
// }
// printf("Div = %lf\n", div);
// */
// #endif
// if(div > maxDiv) {
// maxDiv = div;
// }
// if(div < minDiv) {
// minDiv = div;
// }div;
// }
// }
// }
// }
// }
// }
// #ifdef _DEBUG
printf("Divergence: max = %lf, min = %lf\n", maxDiv, minDiv);
// #endif
double threshold;
// case 1:
// take <perc> percent of the lowest negative value:
// !! have to change the comparison
threshold = maxDiv - minDiv;
threshold = ((double)perc / 100.00) * threshold;
threshold = minDiv + threshold;
/*
// case 2:
// take <perc> percent of the highest pozitive value:
// !! have to change the comparison
// NOT GOOD
threshold = maxDiv - minDiv;
threshold = ((double)perc / 100.00) * threshold;
threshold = maxDiv - threshold;
*/
/*
// case 3:
// take <perc> percent of the lowest value (must be negative):
// !! have to change the comparison
// NOT GOOD
threshold = minDiv;
threshold = ((double)perc / 100.00) * threshold;
threshold = minDiv - threshold;
*/
#ifdef _DEBUG
printf("Threshold set to: %lf\n", threshold);
printf("Number of close to 0 divergence points [-%lf..%lf]: %ld. \n \
Number of non 0 divergence points: %ld.\n",
zerodiv, zerodiv, cntz, cntnz);
#endif
printf("Finding high divergence points (2).\n");
for (k = 1; k < N-1; k++) {
printf("\tProcessing plane %d out of %d\r", k, N-2);
fflush(stdout);
for (j = 1; j < M-1; j++) {
for (i = 1; i < L-1; i++) {
idx = k*slsz + j*L +i;
if(!inOut) {
// - if this point is EXTERIOR, BOUNDARY or SURF, skip it
if( (flags[idx] == EXTERIOR) ||
(flags[idx] == BOUNDARY) ||
(flags[idx] == SURF))
{
continue;
}
}
else {
// we look for high divergence points outside the object too
// ignore only boundary points.
if( (flags[idx] == BOUNDARY) ||
(flags[idx] == SURF))
{
continue;
}
}
for(kk=0; kk < SEARCH_GRID; kk++) {
for(jj=0; jj < SEARCH_GRID; jj++) {
for(ii=0; ii < SEARCH_GRID; ii++) {
x = i + (ii * CELL_SIZE);
y = j + (jj * CELL_SIZE);
z = k + (kk * CELL_SIZE);
#ifdef TRACE
// printf("At point: (%lf, %lf, %lf)\n", x, y, z);
#endif
// interpolate force vectors arround the point
v[0] = interpolation(x + vdist, y, z, L, M, N, ForceField);
v[1] = interpolation(x - vdist, y, z, L, M, N, ForceField);
v[2] = interpolation(x, y + vdist, z, L, M, N, ForceField);
v[3] = interpolation(x, y - vdist, z, L, M, N, ForceField);
v[4] = interpolation(x, y, z + vdist, L, M, N, ForceField);
v[5] = interpolation(x, y, z - vdist, L, M, N, ForceField);
div = ((v[0].xd - v[1].xd) + (v[2].yd - v[3].yd) + (v[4].zd - v[5].zd)) / (2 * vdist);
#ifdef TRACE
/*
printf("Forces:\n");
for(s = 0; s < 6; s++) {
printf("%lf %lf %lf\n", v[s].xd, v[s].yd, v[s].zd);
}
printf("Div = %lf\n", div);
*/
#endif
if(div <= threshold) {
// add the point to the HD list
(*HDPts)[(*numHDPts)].x = x;
(*HDPts)[(*numHDPts)].y = y;
(*HDPts)[(*numHDPts)].z = z;
adiv[(*numHDPts)] = div;
(*numHDPts) = (*numHDPts) + 1;
if((*numHDPts) >= MAX_NUM_HDPTS) {
printf("UPS! Too many high divergence points detected. \
Reached maximum of %d. Abort\n", MAX_NUM_HDPTS);
exit(1);
}
}
}
}
}
}
}
}
//
// sort the points on the divergence value;
//
double minval, tmp;
int minpos;
for(i=0; i < (*numHDPts); i++) {
minval = adiv[i];
minpos = i;
for(j=i+1; j < (*numHDPts); j++) {
if(adiv[j] < minval) {
minval = adiv[j];
minpos = j;
}
}
if(minpos != i) {
// exchange points and div values
tmp = adiv[i];
adiv[i] = adiv[minpos];
adiv[minpos] = tmp;
tmp = (*HDPts)[i].x; (*HDPts)[i].x = (*HDPts)[minpos].x; (*HDPts)[minpos].x = tmp;
tmp = (*HDPts)[i].y; (*HDPts)[i].y = (*HDPts)[minpos].y; (*HDPts)[minpos].y = tmp;
tmp = (*HDPts)[i].z; (*HDPts)[i].z = (*HDPts)[minpos].z; (*HDPts)[minpos].z = tmp;
}
}
#ifdef TRACE
printf("Points: \n");
for(i=0; i < (*numHDPts); i++) {
printf("%f %f %f - %f\n", (*HDPts)[i].x, (*HDPts)[i].y, (*HDPts)[i].z, adiv[i]);
}
#endif
//
// cluster the points
//
// Algorithm:
// First point creates the first group.
// For all the other points:
// If the point is close to an existing group
// add the point to that group
// else
// the point starts a new group
// endif
// endfor
// end
//
// initialize data structure
HDGroup *Groups;
int numGroups = 0;
if((Groups = new HDGroup[(*numHDPts)]) == NULL) {
printf("Error allocating memory for working data structures. Abort\n");
exit(1);
}
for(i=0; i < (*numHDPts); i++) {
if((Groups[i].Points = new int[(*numHDPts)]) == NULL) {
printf("Error allocating memory for working data structures. Abort\n");
exit(1);
}
Groups[i].numPoints = 0;
}
bool closeToSomeGroup = false;
// first point creates the first group
Groups[0].Points[0] = 0;
Groups[0].numPoints = 1;
numGroups = 1;
for(i=1; i < (*numHDPts); i++) {
closeToSomeGroup = false;
for(j=0; j < numGroups; j++) {
if(PointIsCloseToGroup(i, j, Groups, HDPts)) {
// add the point to that group
Groups[j].Points[Groups[j].numPoints] = i;
Groups[j].numPoints = Groups[j].numPoints + 1;
closeToSomeGroup = true;
break;
}
}
if(!closeToSomeGroup) {
// start a new group
Groups[numGroups].Points[0] = i;
Groups[numGroups].numPoints = 1;
numGroups++;
}
}
#ifdef TRACE
// print the clustered points:
printf("Clustered points:\n");
for(i=0; i < numGroups; i++) {
printf("%f %f %f\n",
(*HDPts)[Groups[i].Points[0]].x, (*HDPts)[Groups[i].Points[0]].y, (*HDPts)[Groups[i].Points[0]].z);
for(j=1; j < Groups[i].numPoints; j++) {
printf("\t%f %f %f\n",
(*HDPts)[Groups[i].Points[j]].x, (*HDPts)[Groups[i].Points[j]].y, (*HDPts)[Groups[i].Points[j]].z);
}
}
#endif
//
// Return only the first point in each group as the high divergence points
//
VoxelPositionDouble* newHDPts;
if((newHDPts = new VoxelPositionDouble[numGroups]) == NULL) {
printf("GetHighDivergencePoints: UPS! - Error allocating memory for the output array. Abort.\n");
exit(1);
}
for(i=0; i < numGroups; i++) {
newHDPts[i].x = (*HDPts)[Groups[i].Points[0]].x;
newHDPts[i].y = (*HDPts)[Groups[i].Points[0]].y;
newHDPts[i].z = (*HDPts)[Groups[i].Points[0]].z;
}
// delete the old array
delete [] (*HDPts);
// delete Group data structure
for(i=0; i < numGroups; i++) {
delete [] Groups[i].Points;
}
delete [] Groups;
// return the new array
(*HDPts) = newHDPts;
(*numHDPts) = numGroups;
#ifdef TRACE
printf("Returning points: \n");
for(i=0; i < (*numHDPts); i++) {
printf("%f %f %f - %f\n", (*HDPts)[i].x, (*HDPts)[i].y, (*HDPts)[i].z, adiv[i]);
}
#endif
return true;
}
__device__ __host__ Vector interpolation(double x, double y, double z, int sizx, int sizy, int sizz, Vector *forcevec)
{
float alpha, beta, gamma;
Vector forceInt;
long slsz;
alpha=x-int(x);
beta=y-int(y);
gamma=z-int(z);
slsz=sizy*sizx;
forceInt.xd=forcevec[int(z)*slsz + int(y)*sizx + int(x)].xd*(1-alpha)*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + int(x)].xd*(1-alpha)*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + int(x)].xd*(1-alpha)*beta*(1-gamma)
+forcevec[int(z)*slsz + int(y)*sizx + (int(x)+1)].xd*alpha*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + (int(x)+1)].xd*alpha*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + (int(x)+1)].xd*alpha*beta*(1-gamma)
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + int(x)].xd*(1-alpha)*beta*gamma
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + (int(x)+1)].xd*(alpha*beta*gamma);
forceInt.yd=forcevec[int(z)*slsz + int(y)*sizx + int(x)].yd*(1-alpha)*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + int(x)].yd*(1-alpha)*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + int(x)].yd*(1-alpha)*beta*(1-gamma)
+forcevec[int(z)*slsz + int(y)*sizx + (int(x)+1)].yd*alpha*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + (int(x)+1)].yd*alpha*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + (int(x)+1)].yd*alpha*beta*(1-gamma)
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + int(x)].yd*(1-alpha)*beta*gamma
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + (int(x)+1)].yd*alpha*beta*gamma;
forceInt.zd=forcevec[int(z)*slsz + int(y)*sizx + int(x)].zd*(1-alpha)*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + int(x)].zd*(1-alpha)*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + int(x)].zd*(1-alpha)*beta*(1-gamma)
+forcevec[int(z)*slsz + int(y)*sizx + (int(x)+1)].zd*alpha*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + (int(x)+1)].zd*alpha*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + (int(x)+1)].zd*alpha*beta*(1-gamma)
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + int(x)].zd*(1-alpha)*beta*gamma
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + (int(x)+1)].zd*alpha*beta*gamma;
return(forceInt);
}
inline bool PointIsCloseToGroup(int pt, int grp, HDGroup *Groups, VoxelPositionDouble **HDPts) {
int i;
for(i=0; i < Groups[grp].numPoints; i++) {
if(
(fabs((*HDPts)[pt].x - (*HDPts)[Groups[grp].Points[i]].x) <= 1) &&
(fabs((*HDPts)[pt].y - (*HDPts)[Groups[grp].Points[i]].y) <= 1) &&
(fabs((*HDPts)[pt].z - (*HDPts)[Groups[grp].Points[i]].z) <= 1))
{
return true;
}
}
return false;
}
|
c61ff93c86289b5024b5348133f86ca429a73785.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifdef ENABLE_CURD
#include<curd_lib_host.h>
#endif
/***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <inttypes.h>
#include <parboil.h>
#include <hip/hip_runtime.h>
#include "sad.h"
#include "sad4.h"
#include "largerBlocks.h"
#include "file.h"
#include "image.h"
#define CUDA_ERRCK \
{hipError_t err = hipGetLastError(); \
if (err) fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(err)); \
}
static unsigned short *
load_sads(char *filename);
static void
write_sads(char *filename,
int image_width_macroblocks,
int image_height_macroblocks,
unsigned short *sads);
static void
write_sads_directly(char *filename,
int width,
int height,
unsigned short *sads);
/* FILE I/O */
unsigned short *
load_sads(char *filename)
{
FILE *infile;
unsigned short *sads;
int w;
int h;
int sads_per_block;
infile = fopen(filename, "r");
if (!infile)
{
fprintf(stderr, "Cannot find file '%s'\n", filename);
exit(-1);
}
/* Read image dimensions (measured in macroblocks) */
w = read16u(infile);
h = read16u(infile);
/* Read SAD values. Only interested in the 4x4 SAD values, which are
* at the end of the file. */
sads_per_block = MAX_POS_PADDED * (w * h);
fseek(infile, 25 * sads_per_block * sizeof(unsigned short), SEEK_CUR);
sads = (unsigned short *)malloc(sads_per_block * 16 * sizeof(unsigned short));
fread(sads, sizeof(unsigned short), sads_per_block * 16, infile);
fclose(infile);
return sads;
}
/* Compare the reference SADs to the expected SADs.
*/
void
check_sads(unsigned short *sads_reference,
unsigned short *sads_computed,
int image_size_macroblocks)
{
int block;
/* Check the 4x4 SAD values. These are in sads_reference.
* Ignore the data at the beginning of sads_computed. */
sads_computed += 25 * MAX_POS_PADDED * image_size_macroblocks;
for (block = 0; block < image_size_macroblocks; block++)
{
int subblock;
for (subblock = 0; subblock < 16; subblock++)
{
int sad_index;
for (sad_index = 0; sad_index < MAX_POS; sad_index++)
{
int index =
(block * 16 + subblock) * MAX_POS_PADDED + sad_index;
if (sads_reference[index] != sads_computed[index])
{
#if 0
/* Print exactly where the mismatch was seen */
printf("M %3d %2d %4d (%d = %d)\n", block, subblock, sad_index, sads_reference[index], sads_computed[index]);
#else
goto mismatch;
#endif
}
}
}
}
printf("Success.\n");
return;
mismatch:
printf("Computed SADs do not match expected values.\n");
}
/* Extract the SAD data for a particular block type for a particular
* macroblock from the array of SADs of that block type. */
static inline void
write_subblocks(FILE *outfile, unsigned short *subblock_array, int macroblock,
int count)
{
int block;
int pos;
for (block = 0; block < count; block++)
{
unsigned short *vec = subblock_array +
(block + macroblock * count) * MAX_POS_PADDED;
/* Write all SADs for this sub-block */
for (pos = 0; pos < MAX_POS; pos++)
write16u(outfile, *vec++);
}
}
/* Write some SAD data to a file for output checking.
*
* All SAD values for six rows of macroblocks are written.
* The six rows consist of the top two, middle two, and bottom two image rows.
*/
void
write_sads(char *filename,
int mb_width,
int mb_height,
unsigned short *sads)
{
FILE *outfile = fopen(filename, "w");
int mbs = mb_width * mb_height;
int row_indir;
int row_indices[6] = {0, 1,
mb_height / 2 - 1, mb_height / 2,
mb_height - 2, mb_height - 1};
if (outfile == NULL)
{
fprintf(stderr, "Cannot open output file\n");
exit(-1);
}
/* Write the number of output macroblocks */
write32u(outfile, mb_width * 6);
/* Write zeros */
write32u(outfile, 0);
/* Each row */
for (row_indir = 0; row_indir < 6; row_indir++)
{
int row = row_indices[row_indir];
/* Each block in row */
int block;
for (block = mb_width * row; block < mb_width * (row + 1); block++)
{
int blocktype;
/* Write SADs for all sub-block types */
for (blocktype = 1; blocktype <= 7; blocktype++)
write_subblocks(outfile,
sads + SAD_TYPE_IX(blocktype, mbs),
block,
SAD_TYPE_CT(blocktype));
}
}
fclose(outfile);
}
/* FILE I/O for debugging */
static void
write_sads_directly(char *filename,
int width,
int height,
unsigned short *sads)
{
FILE *f = fopen(filename, "w");
int n;
write16u(f, width);
write16u(f, height);
for (n = 0; n < 41 * MAX_POS_PADDED * (width * height); n++) {
write16u(f, sads[n]);
}
fclose(f);
}
static void
print_test_sad_vector(unsigned short *base, int macroblock, int count)
{
int n;
int searchpos = 17*33+17;
for (n = 0; n < count; n++)
printf(" %d", base[(count * macroblock + n) * MAX_POS_PADDED + searchpos]);
}
static void
print_test_sads(unsigned short *sads_computed,
int mbs)
{
int macroblock = 5;
int blocktype;
for (blocktype = 1; blocktype <= 7; blocktype++)
{
printf("%d:", blocktype);
print_test_sad_vector(sads_computed + SAD_TYPE_IX(blocktype, mbs),
macroblock, SAD_TYPE_CT(blocktype));
puts("\n");
}
}
/* MAIN */
int
main(int argc, char **argv)
{
struct image_i16 *ref_image;
struct image_i16 *cur_image;
unsigned short *sads_computed; /* SADs generated by the program */
int image_size_bytes;
int image_width_macroblocks, image_height_macroblocks;
int image_size_macroblocks;
struct pb_TimerSet timers;
struct pb_Parameters *params;
pb_InitializeTimerSet(&timers);
params = pb_ReadParameters(&argc, argv);
if (pb_Parameters_CountInputs(params) != 2)
{
fprintf(stderr, "Expecting two input filenames\n");
exit(-1);
}
/* Read input files */
pb_SwitchToTimer(&timers, pb_TimerID_IO);
ref_image = load_image(params->inpFiles[0]);
cur_image = load_image(params->inpFiles[1]);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
if ((ref_image->width != cur_image->width) ||
(ref_image->height != cur_image->height))
{
fprintf(stderr, "Input images must be the same size\n");
exit(-1);
}
if ((ref_image->width % 16) || (ref_image->height % 16))
{
fprintf(stderr, "Input image size must be an integral multiple of 16\n");
exit(-1);
}
/* Compute parameters, allocate memory */
image_size_bytes = ref_image->width * ref_image->height * sizeof(short);
image_width_macroblocks = ref_image->width >> 4;
image_height_macroblocks = ref_image->height >> 4;
image_size_macroblocks = image_width_macroblocks * image_height_macroblocks;
sads_computed = (unsigned short *)
malloc(41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(short));
/* Run the kernel code */
{
struct hipArray *ref_ary; /* Reference image on the device */
short *d_cur_image; /* Current image on the device */
unsigned short *d_sads; /* SADs on the device */
dim3 macroblock_grid(image_width_macroblocks, image_height_macroblocks);
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
hipMalloc((void **)&d_cur_image, image_size_bytes);
CUDA_ERRCK
hipMallocArray(&ref_ary, &get_ref().channelDesc,
ref_image->width, ref_image->height);
CUDA_ERRCK
/* Transfer current image to device */
hipMemcpy(d_cur_image, cur_image->data, image_size_bytes,
hipMemcpyHostToDevice);
CUDA_ERRCK
/* Transfer reference image to device */
hipMemcpy2DToArray(ref_ary,
0, 0,
ref_image->data,
ref_image->width * sizeof(unsigned short),
ref_image->width * sizeof(unsigned short),
ref_image->height,
hipMemcpyHostToDevice);
CUDA_ERRCK
hipBindTextureToArray(get_ref(), ref_ary);
CUDA_ERRCK
/* Allocate SAD data on the device */
hipMalloc((void **)&d_sads, 41 * MAX_POS_PADDED * image_size_macroblocks *
sizeof(unsigned short));
CUDA_ERRCK
hipMemset(d_sads, 0, 41 * MAX_POS_PADDED * image_size_macroblocks *
sizeof(unsigned short));
CUDA_ERRCK
pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
// Run the 4x4 kernel
hipLaunchKernelGGL(( mb_sad_calc), dim3(CEIL(ref_image->width / 4, THREADS_W),
CEIL(ref_image->height / 4, THREADS_H)),
dim3(dim3(CEIL(MAX_POS, POS_PER_THREAD) * THREADS_W * THREADS_H)),
SAD_LOC_SIZE_BYTES, 0,
d_sads,
(unsigned short *)d_cur_image,
image_width_macroblocks,
image_height_macroblocks);
CUDA_ERRCK
// Run the larger-blocks kernels
#ifdef ENABLE_CURD
allocateReadWriteSets(macroblock_grid, dim3(32, 4));
#endif
hipLaunchKernelGGL(( larger_sad_calc_8), dim3(macroblock_grid), dim3(dim3(32, 4)), 0, 0,
d_sads,
image_width_macroblocks,
image_height_macroblocks);
#ifdef ENABLE_CURD
freeReadWriteSets(macroblock_grid, dim3(32, 4));
#endif
CUDA_ERRCK
#ifdef ENABLE_CURD
allocateReadWriteSets(macroblock_grid, dim3(32, 1));
#endif
hipLaunchKernelGGL(( larger_sad_calc_16), dim3(macroblock_grid), dim3(dim3(32, 1)), 0, 0,
d_sads,
image_width_macroblocks,
image_height_macroblocks);
#ifdef ENABLE_CURD
freeReadWriteSets(macroblock_grid, dim3(32, 1));
#endif
CUDA_ERRCK
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
/* Transfer SAD data to the host */
hipMemcpy(sads_computed,// + 25 * MAX_POS_PADDED * image_size_macroblocks,
d_sads,// + 25 * MAX_POS_PADDED * image_size_macroblocks,
41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short)
,
hipMemcpyDeviceToHost);
CUDA_ERRCK
/* Free GPU memory */
hipFree(d_sads);
CUDA_ERRCK
hipUnbindTexture(get_ref());
CUDA_ERRCK
hipFreeArray(ref_ary);
CUDA_ERRCK
hipFree(d_cur_image);
CUDA_ERRCK
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
}
/* Print output */
if (params->outFile)
{
pb_SwitchToTimer(&timers, pb_TimerID_IO);
write_sads(params->outFile,
image_width_macroblocks,
image_height_macroblocks,
sads_computed);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
}
#if 0 /* Debugging */
print_test_sads(sads_computed, image_size_macroblocks);
write_sads_directly("sad-debug.bin",
ref_image->width / 16, ref_image->height / 16,
sads_computed);
#endif
/* Free memory */
free(sads_computed);
free_image(ref_image);
free_image(cur_image);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
pb_PrintTimerSet(&timers);
pb_FreeParameters(params);
return 0;
}
|
c61ff93c86289b5024b5348133f86ca429a73785.cu
|
#ifdef ENABLE_CURD
#include<curd_lib_host.h>
#endif
/***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <inttypes.h>
#include <parboil.h>
#include <cuda.h>
#include "sad.h"
#include "sad4.h"
#include "largerBlocks.h"
#include "file.h"
#include "image.h"
#define CUDA_ERRCK \
{cudaError_t err = cudaGetLastError(); \
if (err) fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(err)); \
}
static unsigned short *
load_sads(char *filename);
static void
write_sads(char *filename,
int image_width_macroblocks,
int image_height_macroblocks,
unsigned short *sads);
static void
write_sads_directly(char *filename,
int width,
int height,
unsigned short *sads);
/* FILE I/O */
unsigned short *
load_sads(char *filename)
{
FILE *infile;
unsigned short *sads;
int w;
int h;
int sads_per_block;
infile = fopen(filename, "r");
if (!infile)
{
fprintf(stderr, "Cannot find file '%s'\n", filename);
exit(-1);
}
/* Read image dimensions (measured in macroblocks) */
w = read16u(infile);
h = read16u(infile);
/* Read SAD values. Only interested in the 4x4 SAD values, which are
* at the end of the file. */
sads_per_block = MAX_POS_PADDED * (w * h);
fseek(infile, 25 * sads_per_block * sizeof(unsigned short), SEEK_CUR);
sads = (unsigned short *)malloc(sads_per_block * 16 * sizeof(unsigned short));
fread(sads, sizeof(unsigned short), sads_per_block * 16, infile);
fclose(infile);
return sads;
}
/* Compare the reference SADs to the expected SADs.
*/
void
check_sads(unsigned short *sads_reference,
unsigned short *sads_computed,
int image_size_macroblocks)
{
int block;
/* Check the 4x4 SAD values. These are in sads_reference.
* Ignore the data at the beginning of sads_computed. */
sads_computed += 25 * MAX_POS_PADDED * image_size_macroblocks;
for (block = 0; block < image_size_macroblocks; block++)
{
int subblock;
for (subblock = 0; subblock < 16; subblock++)
{
int sad_index;
for (sad_index = 0; sad_index < MAX_POS; sad_index++)
{
int index =
(block * 16 + subblock) * MAX_POS_PADDED + sad_index;
if (sads_reference[index] != sads_computed[index])
{
#if 0
/* Print exactly where the mismatch was seen */
printf("M %3d %2d %4d (%d = %d)\n", block, subblock, sad_index, sads_reference[index], sads_computed[index]);
#else
goto mismatch;
#endif
}
}
}
}
printf("Success.\n");
return;
mismatch:
printf("Computed SADs do not match expected values.\n");
}
/* Extract the SAD data for a particular block type for a particular
* macroblock from the array of SADs of that block type. */
static inline void
write_subblocks(FILE *outfile, unsigned short *subblock_array, int macroblock,
int count)
{
int block;
int pos;
for (block = 0; block < count; block++)
{
unsigned short *vec = subblock_array +
(block + macroblock * count) * MAX_POS_PADDED;
/* Write all SADs for this sub-block */
for (pos = 0; pos < MAX_POS; pos++)
write16u(outfile, *vec++);
}
}
/* Write some SAD data to a file for output checking.
*
* All SAD values for six rows of macroblocks are written.
* The six rows consist of the top two, middle two, and bottom two image rows.
*/
void
write_sads(char *filename,
int mb_width,
int mb_height,
unsigned short *sads)
{
FILE *outfile = fopen(filename, "w");
int mbs = mb_width * mb_height;
int row_indir;
int row_indices[6] = {0, 1,
mb_height / 2 - 1, mb_height / 2,
mb_height - 2, mb_height - 1};
if (outfile == NULL)
{
fprintf(stderr, "Cannot open output file\n");
exit(-1);
}
/* Write the number of output macroblocks */
write32u(outfile, mb_width * 6);
/* Write zeros */
write32u(outfile, 0);
/* Each row */
for (row_indir = 0; row_indir < 6; row_indir++)
{
int row = row_indices[row_indir];
/* Each block in row */
int block;
for (block = mb_width * row; block < mb_width * (row + 1); block++)
{
int blocktype;
/* Write SADs for all sub-block types */
for (blocktype = 1; blocktype <= 7; blocktype++)
write_subblocks(outfile,
sads + SAD_TYPE_IX(blocktype, mbs),
block,
SAD_TYPE_CT(blocktype));
}
}
fclose(outfile);
}
/* FILE I/O for debugging */
static void
write_sads_directly(char *filename,
int width,
int height,
unsigned short *sads)
{
FILE *f = fopen(filename, "w");
int n;
write16u(f, width);
write16u(f, height);
for (n = 0; n < 41 * MAX_POS_PADDED * (width * height); n++) {
write16u(f, sads[n]);
}
fclose(f);
}
static void
print_test_sad_vector(unsigned short *base, int macroblock, int count)
{
int n;
int searchpos = 17*33+17;
for (n = 0; n < count; n++)
printf(" %d", base[(count * macroblock + n) * MAX_POS_PADDED + searchpos]);
}
static void
print_test_sads(unsigned short *sads_computed,
int mbs)
{
int macroblock = 5;
int blocktype;
for (blocktype = 1; blocktype <= 7; blocktype++)
{
printf("%d:", blocktype);
print_test_sad_vector(sads_computed + SAD_TYPE_IX(blocktype, mbs),
macroblock, SAD_TYPE_CT(blocktype));
puts("\n");
}
}
/* MAIN */
int
main(int argc, char **argv)
{
struct image_i16 *ref_image;
struct image_i16 *cur_image;
unsigned short *sads_computed; /* SADs generated by the program */
int image_size_bytes;
int image_width_macroblocks, image_height_macroblocks;
int image_size_macroblocks;
struct pb_TimerSet timers;
struct pb_Parameters *params;
pb_InitializeTimerSet(&timers);
params = pb_ReadParameters(&argc, argv);
if (pb_Parameters_CountInputs(params) != 2)
{
fprintf(stderr, "Expecting two input filenames\n");
exit(-1);
}
/* Read input files */
pb_SwitchToTimer(&timers, pb_TimerID_IO);
ref_image = load_image(params->inpFiles[0]);
cur_image = load_image(params->inpFiles[1]);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
if ((ref_image->width != cur_image->width) ||
(ref_image->height != cur_image->height))
{
fprintf(stderr, "Input images must be the same size\n");
exit(-1);
}
if ((ref_image->width % 16) || (ref_image->height % 16))
{
fprintf(stderr, "Input image size must be an integral multiple of 16\n");
exit(-1);
}
/* Compute parameters, allocate memory */
image_size_bytes = ref_image->width * ref_image->height * sizeof(short);
image_width_macroblocks = ref_image->width >> 4;
image_height_macroblocks = ref_image->height >> 4;
image_size_macroblocks = image_width_macroblocks * image_height_macroblocks;
sads_computed = (unsigned short *)
malloc(41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(short));
/* Run the kernel code */
{
struct cudaArray *ref_ary; /* Reference image on the device */
short *d_cur_image; /* Current image on the device */
unsigned short *d_sads; /* SADs on the device */
dim3 macroblock_grid(image_width_macroblocks, image_height_macroblocks);
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
cudaMalloc((void **)&d_cur_image, image_size_bytes);
CUDA_ERRCK
cudaMallocArray(&ref_ary, &get_ref().channelDesc,
ref_image->width, ref_image->height);
CUDA_ERRCK
/* Transfer current image to device */
cudaMemcpy(d_cur_image, cur_image->data, image_size_bytes,
cudaMemcpyHostToDevice);
CUDA_ERRCK
/* Transfer reference image to device */
cudaMemcpy2DToArray(ref_ary,
0, 0,
ref_image->data,
ref_image->width * sizeof(unsigned short),
ref_image->width * sizeof(unsigned short),
ref_image->height,
cudaMemcpyHostToDevice);
CUDA_ERRCK
cudaBindTextureToArray(get_ref(), ref_ary);
CUDA_ERRCK
/* Allocate SAD data on the device */
cudaMalloc((void **)&d_sads, 41 * MAX_POS_PADDED * image_size_macroblocks *
sizeof(unsigned short));
CUDA_ERRCK
cudaMemset(d_sads, 0, 41 * MAX_POS_PADDED * image_size_macroblocks *
sizeof(unsigned short));
CUDA_ERRCK
pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
// Run the 4x4 kernel
mb_sad_calc<<<dim3(CEIL(ref_image->width / 4, THREADS_W),
CEIL(ref_image->height / 4, THREADS_H)),
dim3(CEIL(MAX_POS, POS_PER_THREAD) * THREADS_W * THREADS_H),
SAD_LOC_SIZE_BYTES>>>
(d_sads,
(unsigned short *)d_cur_image,
image_width_macroblocks,
image_height_macroblocks);
CUDA_ERRCK
// Run the larger-blocks kernels
#ifdef ENABLE_CURD
allocateReadWriteSets(macroblock_grid, dim3(32, 4));
#endif
larger_sad_calc_8<<<macroblock_grid, dim3(32, 4)>>>
(d_sads,
image_width_macroblocks,
image_height_macroblocks);
#ifdef ENABLE_CURD
freeReadWriteSets(macroblock_grid, dim3(32, 4));
#endif
CUDA_ERRCK
#ifdef ENABLE_CURD
allocateReadWriteSets(macroblock_grid, dim3(32, 1));
#endif
larger_sad_calc_16<<<macroblock_grid, dim3(32, 1)>>>
(d_sads,
image_width_macroblocks,
image_height_macroblocks);
#ifdef ENABLE_CURD
freeReadWriteSets(macroblock_grid, dim3(32, 1));
#endif
CUDA_ERRCK
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
/* Transfer SAD data to the host */
cudaMemcpy(sads_computed,// + 25 * MAX_POS_PADDED * image_size_macroblocks,
d_sads,// + 25 * MAX_POS_PADDED * image_size_macroblocks,
41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short)
,
cudaMemcpyDeviceToHost);
CUDA_ERRCK
/* Free GPU memory */
cudaFree(d_sads);
CUDA_ERRCK
cudaUnbindTexture(get_ref());
CUDA_ERRCK
cudaFreeArray(ref_ary);
CUDA_ERRCK
cudaFree(d_cur_image);
CUDA_ERRCK
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
}
/* Print output */
if (params->outFile)
{
pb_SwitchToTimer(&timers, pb_TimerID_IO);
write_sads(params->outFile,
image_width_macroblocks,
image_height_macroblocks,
sads_computed);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
}
#if 0 /* Debugging */
print_test_sads(sads_computed, image_size_macroblocks);
write_sads_directly("sad-debug.bin",
ref_image->width / 16, ref_image->height / 16,
sads_computed);
#endif
/* Free memory */
free(sads_computed);
free_image(ref_image);
free_image(cur_image);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
pb_PrintTimerSet(&timers);
pb_FreeParameters(params);
return 0;
}
|
2d0d079b0cadfb92160374e30606cbb4a4418aad.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstring>
#include <fstream>
#include <sstream>
#include <string>
#include <vector>
#include <algorithm>
#include <unistd.h>
// NOTE: Need to compile in C++11 mode, add -std=c++11
// These should eventually be specifiable from R
#define TAU 1
#define V_THRESH 1.5
#define THREADS_PER_BLOCK 512
// Integrated Postsynaptic Kernel
__host__ __device__
double ipostkern(double dt) {
if (dt < 0) {
return(0);
}
return(TAU * (1 - exp(-dt / TAU)));
}
// Postsynaptic Kernel
__host__ __device__
double postkern(double dt) {
if (dt < 0) {
return(0);
}
return(exp(-dt / TAU));
}
// Postsynaptic Kernel
__host__ __device__
double dpostkern(double dt) {
if (dt < 0) {
return(0);
}
return((-1.0) / TAU * exp(-dt / TAU));
}
// Integrated refractory kernel.
__host__ __device__
double iprekern(double dt) {
if (dt < 0) {
return(0);
}
return(-V_THRESH);
}
// The inner product function, uses the standard R^n inner product.
__host__ __device__
double inner_prod(double *x, double *y, int n) {
double sum = 0;
for (int i = 0; i < n; i++) {
sum += x[i] * y[i];
}
return(sum);
}
__global__
void par_c_main_loop(double ***ALPHA, double ***Fcal, int **f_count, double ***Ws, int* net_shape, int n_layers,
int t_steps, double t_eps, int l, double ****GAMMA, double ****GAMMAd, const bool copy_gamma) {
double t;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int n = index; n < net_shape[l]; n += stride) {
t = 0;
for (int ti = 0; ti < t_steps; ti++) {
// Calculate total postsynaptic contribution
int n_f = f_count[l][n];
double psc = 0;
for (int tfi = 0; tfi < n_f; tfi++) {
double tf = Fcal[l][n][tfi];
psc += ipostkern(t - tf);
}
ALPHA[l][ti][n] = psc;
if (l > 0) {
// Update refractory contribution
n_f = f_count[l][n];
double refr = 0;
for (int tfi = 0; tfi < n_f; tfi++) {
double tf = Fcal[l][n][tfi];
refr += iprekern(t - tf);
}
// Update potential
double V_n = inner_prod(Ws[l-1][n], ALPHA[l-1][ti], net_shape[l-1]) + refr;
//printf("l = %d, n = %d, ti = %d", l, n, ti);
//printf("Vsl = %d, n = %d, ti = %d", l, n, ti);
// Check for firing neurons
if (V_n > V_THRESH) {
// If an output fire, record the neural state
if (copy_gamma && l == n_layers-1) {
for (int l1 = 0; l1 < n_layers; l1++) {
for (int h = 0; h < net_shape[l1]; h++) {
GAMMA[n][f_count[l][n]][l1][h] = 0;
GAMMAd[n][f_count[l][n]][l1][h] = 0;
for (int ti = 0; ti < f_count[l1][h]; ti++) {
double tf = Fcal[l1][h][ti];
GAMMA[n][f_count[l][n]][l1][h] += postkern(t + t_eps - tf);
GAMMAd[n][f_count[l][n]][l1][h] += dpostkern(t + t_eps - tf);
}
}
}
}
Fcal[l][n][f_count[l][n]] = t + t_eps;
f_count[l][n]++;
}
}
t += t_eps;
}
}
}
// The main simulation, using armadillo for matrix multiplication, and organized in such a way that we solve a sequence embarassingly parallelizable problems.
double **par_sim_body_c(int *net_shape, const int n_layers,
double **Fin, int *f_count_in, long long int **f_max, double ***Ws,
int** f_count, const int t_steps, const double t_eps, double ****GAMMA, double ****GAMMAd, const int debug, const bool copy_gamma) {
// Get the layer with the most neurons
int max_neur = 0;
for (int l = 0; l < n_layers; l++) {
if (max_neur < net_shape[l]) {
max_neur = net_shape[l];
}
}
// ALPHA stores integrated postsynaptic potential in column major order.
// OMEGA stores integrated refractory contribution in row major order.
//double ***ALPHA = (double ***)calloc(n_layers, sizeof(double**));
//double ***OMEGA = (double ***)calloc(n_layers-1, sizeof(double**));
double ***ALPHA, ***OMEGA;
hipMallocManaged(&ALPHA, n_layers * sizeof(double**));
hipMallocManaged(&OMEGA, (n_layers-1) * sizeof(double**));
for (int i = 0; i < n_layers; i++) {
double **ALPHAi;
hipMallocManaged(&ALPHAi, t_steps * sizeof(double*));
ALPHA[i] = ALPHAi;
//ALPHA[i] = (double **) calloc(t_steps, sizeof(double*));
for (int j = 0; j < t_steps; j++) {
double *ALPHAij;
hipMallocManaged(&ALPHAij, net_shape[i] * sizeof(double));
ALPHA[i][j] = ALPHAij;
//ALPHA[i][j] = (double *) calloc(net_shape[i], sizeof(double));
}
if (i > 0) {
double **OMEGAi;
hipMallocManaged(&OMEGAi, net_shape[i] * sizeof(double*));
OMEGA[i-1] = OMEGAi;
//OMEGA[i-1] = (double **) calloc(net_shape[i], sizeof(double*));
for (int j = 0; j < net_shape[i]; j++) {
double *OMEGAij;
hipMallocManaged(&OMEGAij, t_steps * sizeof(double));
OMEGA[i-1][j] = OMEGAij;
//OMEGA[i-1][j] = (double *) calloc(t_steps, sizeof(double));
}
}
}
if (debug >= 1)
printf("After ALPHA\n");
// Storage for firing times
//double ***u_Fcal = (double ***)calloc(n_layers, sizeof(double**));
double ***u_Fcal;
hipMallocManaged(&u_Fcal, n_layers * sizeof(double**));
// Copy input spike times to unified memory.
double **u_Fin;
hipMallocManaged(&u_Fin, net_shape[0] * sizeof(double*));
for (int n = 0; n < net_shape[0]; n++) {
double *u_Finn;
hipMallocManaged(&u_Finn, f_count_in[n] * sizeof(double));
hipMemcpy(u_Finn, Fin[n], f_count_in[n] * sizeof(double), hipMemcpyDefault);
u_Fin[n] = u_Finn;
}
if (debug >= 1)
printf("After inputs \n");
//int **myarr = (int **)malloc(2*sizeof(int *));
//myarr[0] = (int **)malloc(2*sizeof(int));
//myarr[1] = (int **)malloc(2*sizeof(int));
//myarr[0][0] = 0;
//myarr[0][1] = 1;
//myarr[1][0] = 2;
//myarr[1][1] = 3;
//int **d_myarr;
//hipMallocManaged(&d_myarr, 2*sizeof(int *));
//hipMemcpy(d_myarr, myarr, 2*sizeof(int *), hipMemcpyDefault);
int **u_f_count;
hipMallocManaged(&u_f_count, n_layers * sizeof(int *));
int *u_f_count_in;
hipMallocManaged(&u_f_count_in, net_shape[0] * sizeof(int));
hipMemcpy(u_f_count_in, f_count_in, net_shape[0] * sizeof(int), hipMemcpyDefault);
//f_count[0] = u_f_count_in;
hipMemcpy(&u_f_count[0], &u_f_count_in, sizeof(int *), hipMemcpyDefault);
u_Fcal[0] = u_Fin;
for (int l = 0; l < n_layers-1; l++) {
//double **Fi = (double **) calloc(net_shape[l+1], sizeof(double *));
double **Fi;
hipMallocManaged(&Fi, net_shape[l+1] * sizeof(double *));
u_Fcal[l+1] = Fi;
//double **Fi = (double **) calloc(net_shape[l+1], sizeof(double *));
int *f_countl;
hipMallocManaged(&f_countl, net_shape[l+1] * sizeof(int));
hipMemcpy(&u_f_count[l+1], &f_countl, sizeof(int *), hipMemcpyDefault);
for (int n = 0; n < net_shape[l+1]; n++) {
// Init fire counts at 0.
u_f_count[l+1][n] = 0;
double *Fln;
//printf("Number A\n");
//printf("%d\n", f_max[l+1][n]);
//printf("Number Z\n");
hipMallocManaged(&Fln, f_max[l+1][n] * sizeof(double));
//printf("Number B\n");
Fi[n] = Fln;
//printf("Number C\n");
// Initialize storeage to -1, so any negative firing time means did not fire.
for (int f = 0; f < f_max[l+1][n]; f++) {
Fi[n][f] = -1;
}
}
}
if (debug >= 1)
printf("After Fi copy\n");
//// Convert Connection weights to a C array
//// Ws[i] is the ith layer, Ws[i][j] is the jth row of layer i,
//// Ws[i][j][k] is the j,k element of layer i (row major ordering).
//double ***Ws_c = (double***)calloc(net_size-1, sizeof(double**));
//for (int l = 0; l < net_size-1; l++) {
// Ws_c[l] = (double**)calloc(net_shape[l], sizeof(double*));
// for (int n = 0; n < net_shape[l]; n++) {
// Ws_c[l][n] = Ws_in + wlo[l] + net_shape[l+1] * n;
// }
//}
// Do GAMMA(d)
// d_GAMMA[on][fi][l]][[h] Gives the instantaneous postsynaptic current of neuron h of layer l to firing time fi of output neuron on.
double ****d_GAMMA, ****d_GAMMAd;
if (copy_gamma) {
hipMallocManaged(&d_GAMMA, (n_layers-1) * sizeof(double***));
hipMallocManaged(&d_GAMMAd, (n_layers-1) * sizeof(double***));
for (int on = 0; on < net_shape[n_layers-1]; on++) {
hipMallocManaged(&d_GAMMA[on], f_max[n_layers-1][on] * sizeof(double **));
hipMallocManaged(&d_GAMMAd[on], f_max[n_layers-1][on] * sizeof(double **));
for (int fi = 0; fi < f_max[n_layers-1][on]; fi++) {
hipMallocManaged(&d_GAMMA[on][fi], n_layers * sizeof(double*));
hipMallocManaged(&d_GAMMAd[on][fi], n_layers * sizeof(double*));
for (int l = 0; l < n_layers; l++) {
hipMallocManaged(&d_GAMMA[on][fi][l], net_shape[l] * sizeof(double));
hipMallocManaged(&d_GAMMAd[on][fi][l], net_shape[l] * sizeof(double));
for (int h = 0; h < net_shape[l]; h++) {
d_GAMMA[on][fi][l][h] = -1;
d_GAMMAd[on][fi][l][h] = -1;
}
}
}
}
if (debug >= 1)
printf("Initted GAMMA storage \n");
}
// Copy weights to unified memory
double ***u_Ws;
hipMallocManaged(&u_Ws, (n_layers-1) * sizeof(double**));
for (int l = 0; l < n_layers-1; l++) {
double **u_Wsl;
hipMallocManaged(&u_Wsl, (net_shape[l+1]) * sizeof(double*));
u_Ws[l] = u_Wsl;
for (int n = 0; n < net_shape[l+1]; n++) {
double *u_Wsln;
hipMallocManaged(&u_Wsln, net_shape[l] * sizeof(double));
hipMemcpy(u_Wsln, Ws[l][n], net_shape[l] * sizeof(double), hipMemcpyDefault);
u_Ws[l][n] = u_Wsln;
}
}
if (debug >= 1)
printf("After Weights copy\n");
// Copy network shape to unified memory
int *u_net_shape;
hipMallocManaged(&u_net_shape, n_layers * sizeof(int));
hipMemcpy(u_net_shape, net_shape, n_layers * sizeof(int), hipMemcpyDefault);
// Run actual inference
//TODO: Should just be + 1
int n_blocks = max_neur / THREADS_PER_BLOCK + 1;
// Main Loop
for (int l = 0; l < n_layers; l++) {
if (debug >= 1)
printf(" Solving Layer %d...\n", l);
hipLaunchKernelGGL(( par_c_main_loop), dim3(n_blocks), dim3(THREADS_PER_BLOCK), 0, 0, ALPHA, u_Fcal, u_f_count, u_Ws, u_net_shape, n_layers,
t_steps, t_eps, l, d_GAMMA, d_GAMMAd, copy_gamma);
hipDeviceSynchronize();
}
if (debug >= 1)
printf("After main loop\n");
// Clean up
for (int i = 0; i < n_layers; i++) {
for (int j = 0; j < t_steps; j++) {
hipFree(ALPHA[i][j]);
}
hipFree(ALPHA[i]);
if (i > 0) {
for (int j = 0; j < net_shape[i]; j++) {
hipFree(OMEGA[i-1][j]);
}
hipFree(OMEGA[i-1]);
}
}
hipFree(ALPHA);
hipFree(OMEGA);
if (debug >= 1)
printf("After Free\n");
// Copy Fcal to host memory
//double ***Fcal = (double ***)malloc(n_layers * sizeof(double **));
//for (int l = 0; l < n_layers; l++) {
// Fcal[l] = (double **)malloc(net_shape[l] * sizeof(double *));
// for (int n = 0; n < net_shape[l]; n++) {
// Fcal[l][n] = (double *)malloc(f_max[l][n] * sizeof(double));
// hipMemcpy(Fcal[l][n], u_Fcal[l][n], f_max[l][n] * sizeof(double), hipMemcpyDefault);
// }
//}
// Copy output spikes to host memory
double **Fout = (double **)malloc(net_shape[n_layers-1]*sizeof(double*));
for (int n = 0; n < net_shape[n_layers-1]; n++) {
Fout[n] = (double *)malloc(f_max[n_layers-1][n] * sizeof(double));
hipMemcpy(Fout[n], u_Fcal[n_layers-1][n], f_max[n_layers-1][n] * sizeof(double), hipMemcpyDefault);
}
// Copy f_count to host memory
for (int l = 0; l < n_layers; l++) {
f_count[l] = (int *)calloc(net_shape[l], sizeof(int));
hipMemcpy(f_count[l], u_f_count[l], net_shape[l] * sizeof(int), hipMemcpyDefault);
}
if (debug >= 1)
printf("After output spike spike/f_count\n");
// Copy to host memory
// d_GAMMA[on][fi][l]][[h] Gives the instantaneous postsynaptic current of neuron h of layer l to firing time fi of output neuron on.
//GAMMA = (double****)malloc((n_layers-1) * sizeof(double***));
//GAMMAd = (double****)malloc((n_layers-1) * sizeof(double***));
if (copy_gamma) {
for (int on = 0; on < net_shape[n_layers-1]; on++) {
GAMMA[on] = (double***)malloc(f_max[n_layers-1][on] * sizeof(double**));
GAMMAd[on] = (double***)malloc(f_max[n_layers-1][on] * sizeof(double**));
for (int fi = 0; fi < f_max[n_layers-1][on]; fi++) {
GAMMA[on][fi] = (double**)malloc(n_layers * sizeof(double*));
GAMMAd[on][fi] = (double**)malloc(n_layers * sizeof(double*));
for (int l = 0; l < n_layers; l++) {
GAMMA[on][fi][l] = (double*)malloc(net_shape[l] * sizeof(double));
GAMMAd[on][fi][l] = (double*)malloc(net_shape[l] * sizeof(double));
hipMemcpy(GAMMA[on][fi][l], d_GAMMA[on][fi][l], net_shape[l] * sizeof(double), hipMemcpyDefault);
hipMemcpy(GAMMAd[on][fi][l], d_GAMMAd[on][fi][l], net_shape[l] * sizeof(double), hipMemcpyDefault);
}
}
}
if (debug >= 1)
printf("After GAMMA copy\n");
}
//TODO: copy f_count
return(Fout);
}
|
2d0d079b0cadfb92160374e30606cbb4a4418aad.cu
|
#include <iostream>
#include <cstring>
#include <fstream>
#include <sstream>
#include <string>
#include <vector>
#include <algorithm>
#include <unistd.h>
// NOTE: Need to compile in C++11 mode, add -std=c++11
// These should eventually be specifiable from R
#define TAU 1
#define V_THRESH 1.5
#define THREADS_PER_BLOCK 512
// Integrated Postsynaptic Kernel
__host__ __device__
double ipostkern(double dt) {
if (dt < 0) {
return(0);
}
return(TAU * (1 - exp(-dt / TAU)));
}
// Postsynaptic Kernel
__host__ __device__
double postkern(double dt) {
if (dt < 0) {
return(0);
}
return(exp(-dt / TAU));
}
// Postsynaptic Kernel
__host__ __device__
double dpostkern(double dt) {
if (dt < 0) {
return(0);
}
return((-1.0) / TAU * exp(-dt / TAU));
}
// Integrated refractory kernel.
__host__ __device__
double iprekern(double dt) {
if (dt < 0) {
return(0);
}
return(-V_THRESH);
}
// The inner product function, uses the standard R^n inner product.
__host__ __device__
double inner_prod(double *x, double *y, int n) {
double sum = 0;
for (int i = 0; i < n; i++) {
sum += x[i] * y[i];
}
return(sum);
}
__global__
void par_c_main_loop(double ***ALPHA, double ***Fcal, int **f_count, double ***Ws, int* net_shape, int n_layers,
int t_steps, double t_eps, int l, double ****GAMMA, double ****GAMMAd, const bool copy_gamma) {
double t;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int n = index; n < net_shape[l]; n += stride) {
t = 0;
for (int ti = 0; ti < t_steps; ti++) {
// Calculate total postsynaptic contribution
int n_f = f_count[l][n];
double psc = 0;
for (int tfi = 0; tfi < n_f; tfi++) {
double tf = Fcal[l][n][tfi];
psc += ipostkern(t - tf);
}
ALPHA[l][ti][n] = psc;
if (l > 0) {
// Update refractory contribution
n_f = f_count[l][n];
double refr = 0;
for (int tfi = 0; tfi < n_f; tfi++) {
double tf = Fcal[l][n][tfi];
refr += iprekern(t - tf);
}
// Update potential
double V_n = inner_prod(Ws[l-1][n], ALPHA[l-1][ti], net_shape[l-1]) + refr;
//printf("l = %d, n = %d, ti = %d", l, n, ti);
//printf("Vsl = %d, n = %d, ti = %d", l, n, ti);
// Check for firing neurons
if (V_n > V_THRESH) {
// If an output fire, record the neural state
if (copy_gamma && l == n_layers-1) {
for (int l1 = 0; l1 < n_layers; l1++) {
for (int h = 0; h < net_shape[l1]; h++) {
GAMMA[n][f_count[l][n]][l1][h] = 0;
GAMMAd[n][f_count[l][n]][l1][h] = 0;
for (int ti = 0; ti < f_count[l1][h]; ti++) {
double tf = Fcal[l1][h][ti];
GAMMA[n][f_count[l][n]][l1][h] += postkern(t + t_eps - tf);
GAMMAd[n][f_count[l][n]][l1][h] += dpostkern(t + t_eps - tf);
}
}
}
}
Fcal[l][n][f_count[l][n]] = t + t_eps;
f_count[l][n]++;
}
}
t += t_eps;
}
}
}
// The main simulation, using armadillo for matrix multiplication, and organized in such a way that we solve a sequence embarassingly parallelizable problems.
double **par_sim_body_c(int *net_shape, const int n_layers,
double **Fin, int *f_count_in, long long int **f_max, double ***Ws,
int** f_count, const int t_steps, const double t_eps, double ****GAMMA, double ****GAMMAd, const int debug, const bool copy_gamma) {
// Get the layer with the most neurons
int max_neur = 0;
for (int l = 0; l < n_layers; l++) {
if (max_neur < net_shape[l]) {
max_neur = net_shape[l];
}
}
// ALPHA stores integrated postsynaptic potential in column major order.
// OMEGA stores integrated refractory contribution in row major order.
//double ***ALPHA = (double ***)calloc(n_layers, sizeof(double**));
//double ***OMEGA = (double ***)calloc(n_layers-1, sizeof(double**));
double ***ALPHA, ***OMEGA;
cudaMallocManaged(&ALPHA, n_layers * sizeof(double**));
cudaMallocManaged(&OMEGA, (n_layers-1) * sizeof(double**));
for (int i = 0; i < n_layers; i++) {
double **ALPHAi;
cudaMallocManaged(&ALPHAi, t_steps * sizeof(double*));
ALPHA[i] = ALPHAi;
//ALPHA[i] = (double **) calloc(t_steps, sizeof(double*));
for (int j = 0; j < t_steps; j++) {
double *ALPHAij;
cudaMallocManaged(&ALPHAij, net_shape[i] * sizeof(double));
ALPHA[i][j] = ALPHAij;
//ALPHA[i][j] = (double *) calloc(net_shape[i], sizeof(double));
}
if (i > 0) {
double **OMEGAi;
cudaMallocManaged(&OMEGAi, net_shape[i] * sizeof(double*));
OMEGA[i-1] = OMEGAi;
//OMEGA[i-1] = (double **) calloc(net_shape[i], sizeof(double*));
for (int j = 0; j < net_shape[i]; j++) {
double *OMEGAij;
cudaMallocManaged(&OMEGAij, t_steps * sizeof(double));
OMEGA[i-1][j] = OMEGAij;
//OMEGA[i-1][j] = (double *) calloc(t_steps, sizeof(double));
}
}
}
if (debug >= 1)
printf("After ALPHA\n");
// Storage for firing times
//double ***u_Fcal = (double ***)calloc(n_layers, sizeof(double**));
double ***u_Fcal;
cudaMallocManaged(&u_Fcal, n_layers * sizeof(double**));
// Copy input spike times to unified memory.
double **u_Fin;
cudaMallocManaged(&u_Fin, net_shape[0] * sizeof(double*));
for (int n = 0; n < net_shape[0]; n++) {
double *u_Finn;
cudaMallocManaged(&u_Finn, f_count_in[n] * sizeof(double));
cudaMemcpy(u_Finn, Fin[n], f_count_in[n] * sizeof(double), cudaMemcpyDefault);
u_Fin[n] = u_Finn;
}
if (debug >= 1)
printf("After inputs \n");
//int **myarr = (int **)malloc(2*sizeof(int *));
//myarr[0] = (int **)malloc(2*sizeof(int));
//myarr[1] = (int **)malloc(2*sizeof(int));
//myarr[0][0] = 0;
//myarr[0][1] = 1;
//myarr[1][0] = 2;
//myarr[1][1] = 3;
//int **d_myarr;
//cudaMallocManaged(&d_myarr, 2*sizeof(int *));
//cudaMemcpy(d_myarr, myarr, 2*sizeof(int *), cudaMemcpyDefault);
int **u_f_count;
cudaMallocManaged(&u_f_count, n_layers * sizeof(int *));
int *u_f_count_in;
cudaMallocManaged(&u_f_count_in, net_shape[0] * sizeof(int));
cudaMemcpy(u_f_count_in, f_count_in, net_shape[0] * sizeof(int), cudaMemcpyDefault);
//f_count[0] = u_f_count_in;
cudaMemcpy(&u_f_count[0], &u_f_count_in, sizeof(int *), cudaMemcpyDefault);
u_Fcal[0] = u_Fin;
for (int l = 0; l < n_layers-1; l++) {
//double **Fi = (double **) calloc(net_shape[l+1], sizeof(double *));
double **Fi;
cudaMallocManaged(&Fi, net_shape[l+1] * sizeof(double *));
u_Fcal[l+1] = Fi;
//double **Fi = (double **) calloc(net_shape[l+1], sizeof(double *));
int *f_countl;
cudaMallocManaged(&f_countl, net_shape[l+1] * sizeof(int));
cudaMemcpy(&u_f_count[l+1], &f_countl, sizeof(int *), cudaMemcpyDefault);
for (int n = 0; n < net_shape[l+1]; n++) {
// Init fire counts at 0.
u_f_count[l+1][n] = 0;
double *Fln;
//printf("Number A\n");
//printf("%d\n", f_max[l+1][n]);
//printf("Number Z\n");
cudaMallocManaged(&Fln, f_max[l+1][n] * sizeof(double));
//printf("Number B\n");
Fi[n] = Fln;
//printf("Number C\n");
// Initialize storeage to -1, so any negative firing time means did not fire.
for (int f = 0; f < f_max[l+1][n]; f++) {
Fi[n][f] = -1;
}
}
}
if (debug >= 1)
printf("After Fi copy\n");
//// Convert Connection weights to a C array
//// Ws[i] is the ith layer, Ws[i][j] is the jth row of layer i,
//// Ws[i][j][k] is the j,k element of layer i (row major ordering).
//double ***Ws_c = (double***)calloc(net_size-1, sizeof(double**));
//for (int l = 0; l < net_size-1; l++) {
// Ws_c[l] = (double**)calloc(net_shape[l], sizeof(double*));
// for (int n = 0; n < net_shape[l]; n++) {
// Ws_c[l][n] = Ws_in + wlo[l] + net_shape[l+1] * n;
// }
//}
// Do GAMMA(d)
// d_GAMMA[on][fi][l]][[h] Gives the instantaneous postsynaptic current of neuron h of layer l to firing time fi of output neuron on.
double ****d_GAMMA, ****d_GAMMAd;
if (copy_gamma) {
cudaMallocManaged(&d_GAMMA, (n_layers-1) * sizeof(double***));
cudaMallocManaged(&d_GAMMAd, (n_layers-1) * sizeof(double***));
for (int on = 0; on < net_shape[n_layers-1]; on++) {
cudaMallocManaged(&d_GAMMA[on], f_max[n_layers-1][on] * sizeof(double **));
cudaMallocManaged(&d_GAMMAd[on], f_max[n_layers-1][on] * sizeof(double **));
for (int fi = 0; fi < f_max[n_layers-1][on]; fi++) {
cudaMallocManaged(&d_GAMMA[on][fi], n_layers * sizeof(double*));
cudaMallocManaged(&d_GAMMAd[on][fi], n_layers * sizeof(double*));
for (int l = 0; l < n_layers; l++) {
cudaMallocManaged(&d_GAMMA[on][fi][l], net_shape[l] * sizeof(double));
cudaMallocManaged(&d_GAMMAd[on][fi][l], net_shape[l] * sizeof(double));
for (int h = 0; h < net_shape[l]; h++) {
d_GAMMA[on][fi][l][h] = -1;
d_GAMMAd[on][fi][l][h] = -1;
}
}
}
}
if (debug >= 1)
printf("Initted GAMMA storage \n");
}
// Copy weights to unified memory
double ***u_Ws;
cudaMallocManaged(&u_Ws, (n_layers-1) * sizeof(double**));
for (int l = 0; l < n_layers-1; l++) {
double **u_Wsl;
cudaMallocManaged(&u_Wsl, (net_shape[l+1]) * sizeof(double*));
u_Ws[l] = u_Wsl;
for (int n = 0; n < net_shape[l+1]; n++) {
double *u_Wsln;
cudaMallocManaged(&u_Wsln, net_shape[l] * sizeof(double));
cudaMemcpy(u_Wsln, Ws[l][n], net_shape[l] * sizeof(double), cudaMemcpyDefault);
u_Ws[l][n] = u_Wsln;
}
}
if (debug >= 1)
printf("After Weights copy\n");
// Copy network shape to unified memory
int *u_net_shape;
cudaMallocManaged(&u_net_shape, n_layers * sizeof(int));
cudaMemcpy(u_net_shape, net_shape, n_layers * sizeof(int), cudaMemcpyDefault);
// Run actual inference
//TODO: Should just be + 1
int n_blocks = max_neur / THREADS_PER_BLOCK + 1;
// Main Loop
for (int l = 0; l < n_layers; l++) {
if (debug >= 1)
printf(" Solving Layer %d...\n", l);
par_c_main_loop<<<n_blocks, THREADS_PER_BLOCK>>>(ALPHA, u_Fcal, u_f_count, u_Ws, u_net_shape, n_layers,
t_steps, t_eps, l, d_GAMMA, d_GAMMAd, copy_gamma);
cudaDeviceSynchronize();
}
if (debug >= 1)
printf("After main loop\n");
// Clean up
for (int i = 0; i < n_layers; i++) {
for (int j = 0; j < t_steps; j++) {
cudaFree(ALPHA[i][j]);
}
cudaFree(ALPHA[i]);
if (i > 0) {
for (int j = 0; j < net_shape[i]; j++) {
cudaFree(OMEGA[i-1][j]);
}
cudaFree(OMEGA[i-1]);
}
}
cudaFree(ALPHA);
cudaFree(OMEGA);
if (debug >= 1)
printf("After Free\n");
// Copy Fcal to host memory
//double ***Fcal = (double ***)malloc(n_layers * sizeof(double **));
//for (int l = 0; l < n_layers; l++) {
// Fcal[l] = (double **)malloc(net_shape[l] * sizeof(double *));
// for (int n = 0; n < net_shape[l]; n++) {
// Fcal[l][n] = (double *)malloc(f_max[l][n] * sizeof(double));
// cudaMemcpy(Fcal[l][n], u_Fcal[l][n], f_max[l][n] * sizeof(double), cudaMemcpyDefault);
// }
//}
// Copy output spikes to host memory
double **Fout = (double **)malloc(net_shape[n_layers-1]*sizeof(double*));
for (int n = 0; n < net_shape[n_layers-1]; n++) {
Fout[n] = (double *)malloc(f_max[n_layers-1][n] * sizeof(double));
cudaMemcpy(Fout[n], u_Fcal[n_layers-1][n], f_max[n_layers-1][n] * sizeof(double), cudaMemcpyDefault);
}
// Copy f_count to host memory
for (int l = 0; l < n_layers; l++) {
f_count[l] = (int *)calloc(net_shape[l], sizeof(int));
cudaMemcpy(f_count[l], u_f_count[l], net_shape[l] * sizeof(int), cudaMemcpyDefault);
}
if (debug >= 1)
printf("After output spike spike/f_count\n");
// Copy to host memory
// d_GAMMA[on][fi][l]][[h] Gives the instantaneous postsynaptic current of neuron h of layer l to firing time fi of output neuron on.
//GAMMA = (double****)malloc((n_layers-1) * sizeof(double***));
//GAMMAd = (double****)malloc((n_layers-1) * sizeof(double***));
if (copy_gamma) {
for (int on = 0; on < net_shape[n_layers-1]; on++) {
GAMMA[on] = (double***)malloc(f_max[n_layers-1][on] * sizeof(double**));
GAMMAd[on] = (double***)malloc(f_max[n_layers-1][on] * sizeof(double**));
for (int fi = 0; fi < f_max[n_layers-1][on]; fi++) {
GAMMA[on][fi] = (double**)malloc(n_layers * sizeof(double*));
GAMMAd[on][fi] = (double**)malloc(n_layers * sizeof(double*));
for (int l = 0; l < n_layers; l++) {
GAMMA[on][fi][l] = (double*)malloc(net_shape[l] * sizeof(double));
GAMMAd[on][fi][l] = (double*)malloc(net_shape[l] * sizeof(double));
cudaMemcpy(GAMMA[on][fi][l], d_GAMMA[on][fi][l], net_shape[l] * sizeof(double), cudaMemcpyDefault);
cudaMemcpy(GAMMAd[on][fi][l], d_GAMMAd[on][fi][l], net_shape[l] * sizeof(double), cudaMemcpyDefault);
}
}
}
if (debug >= 1)
printf("After GAMMA copy\n");
}
//TODO: copy f_count
return(Fout);
}
|
8eb16e8223507327dcb520f8f1c476a4ece43895.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
//
// @author Yurii Shyrma ([email protected])
//
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <math/templatemath.h>
#include <ops/declarable/helpers/convolutions.h>
namespace sd {
namespace ops {
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static SD_KERNEL void avgPooling2dCuda(const void *vx, const sd::LongType *xShapeInfo, void *vz,
const sd::LongType *zShapeInfo, const int kH, const int kW, const int sH,
const int sW, const int pH, const int pW, const int dH, const int dW,
const int extraParam0) {
// input is [bS, iC, iH, iW]
// output is [bS, iC, oH, oW]
const auto x = reinterpret_cast<const X *>(vx);
auto z = reinterpret_cast<Z *>(vz);
__shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX,
length, kHEff, kWEff;
if (threadIdx.x == 0) {
bS = shape::sizeAt(xShapeInfo, 0);
iC = shape::sizeAt(xShapeInfo, 1);
oH = shape::sizeAt(zShapeInfo, 2);
oW = shape::sizeAt(zShapeInfo, 3);
iH = shape::sizeAt(xShapeInfo, 2);
iW = shape::sizeAt(xShapeInfo, 3);
strideB = shape::stride(xShapeInfo)[0];
strideC = shape::stride(xShapeInfo)[1];
strideY = shape::stride(xShapeInfo)[2];
strideX = shape::stride(xShapeInfo)[3];
strideOB = shape::stride(zShapeInfo)[0];
strideOC = shape::stride(zShapeInfo)[1];
strideOY = shape::stride(zShapeInfo)[2];
strideOX = shape::stride(zShapeInfo)[3];
length = shape::length(zShapeInfo);
// Replace kernel H/W with *effective* kernel H/W accounting for dilatyon
kHEff = kH + (kH - 1) * (dH - 1);
kWEff = kW + (kW - 1) * (dW - 1);
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int index = tid; index < length; index += blockDim.x * gridDim.x) {
const int pw = index % oW;
const int ph = (index / oW) % oH;
const int c = (index / oW / oH) % iC;
const int n = index / oW / oH / iC;
int hstart = sH * ph - pH;
int wstart = sW * pw - pW;
int hend = hstart + kHEff;
int wend = wstart + kWEff;
if (hstart < 0) {
int f = sd::math::sd_ceil<Z, int>((Z)-hstart / (Z)dH);
hstart += f * dH;
}
if (wstart < 0) {
int f = sd::math::sd_ceil<Z, int>((Z)-wstart / (Z)dW);
wstart += f * dW;
}
if (hend > iH) {
int f = sd::math::sd_ceil<Z, int>((Z)(hend - iH) / (Z)dH);
hend -= f * dH;
}
if (wend > iW) {
int f = sd::math::sd_ceil<Z, int>((Z)(wend - iW) / (Z)dW);
wend -= f * dW;
}
// Accounts for dilation
int pool_size = sd::math::sd_ceil<double, int>((double)(hend - hstart) / (double)dH) *
sd::math::sd_ceil<double, int>((double)(wend - wstart) / (double)dW);
Z sum = 0.0f;
const X *inSlice = x + (n * strideB + c * strideC);
for (int h = hstart; h < hend; h += dH)
for (int w = wstart; w < wend; w += dW) sum += static_cast<Z>(inSlice[h * strideY + w * strideX]);
int divide_factor = pool_size; // Case 0: exclude padding
if (extraParam0 == 1) // Case 1: include padding
divide_factor = kH * kW;
z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = sum / static_cast<Z>(divide_factor);
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static void avgPooling2dCudaLauncher(sd::LaunchContext &block, const void *vx, const sd::LongType *vxShapeInfo,
void *vz, const sd::LongType *vzShapeInfo, const int kH, const int kW,
const int sH, const int sW, const int pH, const int pW, const int dH, const int dW,
const int extraParam0) {
hipLaunchKernelGGL(( avgPooling2dCuda<X, Z>), dim3(512), dim3(512), 4192, *block.getCudaStream(), vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW,
pH, pW, dH, dW, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static SD_KERNEL void pnormPooling2dCuda(const void *vx, const sd::LongType *xShapeInfo, void *vz,
const sd::LongType *zShapeInfo, const int kH, const int kW, const int sH,
const int sW, const int pH, const int pW, const int dH, const int dW,
const int extraParam0) {
// input is [bS, iC, iH, iW]
// output is [bS, iC, oH, oW]
const auto x = reinterpret_cast<const X *>(vx);
auto z = reinterpret_cast<Z *>(vz);
__shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX,
length, kHEff, kWEff;
__shared__ bool fOrder;
if (threadIdx.x == 0) {
bS = shape::sizeAt(xShapeInfo, 0);
iC = shape::sizeAt(xShapeInfo, 1);
oH = shape::sizeAt(zShapeInfo, 2);
oW = shape::sizeAt(zShapeInfo, 3);
iH = shape::sizeAt(xShapeInfo, 2);
iW = shape::sizeAt(xShapeInfo, 3);
strideB = shape::stride(xShapeInfo)[0];
strideC = shape::stride(xShapeInfo)[1];
strideY = shape::stride(xShapeInfo)[2];
strideX = shape::stride(xShapeInfo)[3];
strideOB = shape::stride(zShapeInfo)[0];
strideOC = shape::stride(zShapeInfo)[1];
strideOY = shape::stride(zShapeInfo)[2];
strideOX = shape::stride(zShapeInfo)[3];
length = shape::length(zShapeInfo);
// Replace kernel H/W with *effective* kernel H/W accounting for dilatyon
kHEff = kH + (kH - 1) * (dH - 1);
kWEff = kW + (kW - 1) * (dW - 1);
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int index = tid; index < length; index += blockDim.x * gridDim.x) {
const int pw = index % oW;
const int ph = (index / oW) % oH;
const int c = (index / oW / oH) % iC;
const int n = index / oW / oH / iC;
int hstart = sH * ph - pH;
int wstart = sW * pw - pW;
int hend = hstart + kHEff;
int wend = wstart + kWEff;
if (hstart < 0) {
int f = sd::math::sd_ceil<Z, int>((Z)-hstart / (Z)dH);
hstart += f * dH;
}
if (wstart < 0) {
int f = sd::math::sd_ceil<Z, int>((Z)-wstart / (Z)dW);
wstart += f * dW;
}
if (hend > iH) {
int f = sd::math::sd_ceil<Z, int>((Z)(hend - iH) / (Z)dH);
hend -= f * dH;
}
if (wend > iW) {
int f = sd::math::sd_ceil<Z, int>((Z)(wend - iW) / (Z)dW);
wend -= f * dW;
}
// Accounts for dilation
int pool_size = sd::math::sd_ceil<double, int>((double)(hend - hstart) / (double)dH) *
sd::math::sd_ceil<double, int>((double)(wend - wstart) / (double)dW);
Z sum = 0.f;
const X *inSlice = x + (n * strideB + c * strideC);
for (int h = hstart; h < hend; h += dH)
for (int w = wstart; w < wend; w += dW)
sum += sd::math::sd_pow<Z, Z, Z>(static_cast<Z>(sd::math::sd_abs<X>(inSlice[h * strideY + w * strideX])),
extraParam0);
z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] =
sd::math::sd_pow<Z, Z, Z>(sum, (Z)1.0f / extraParam0);
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static void pnormPooling2dCudaLauncher(sd::LaunchContext &block, const void *vx, const sd::LongType *vxShapeInfo,
void *vz, const sd::LongType *vzShapeInfo, const int kH, const int kW,
const int sH, const int sW, const int pH, const int pW, const int dH,
const int dW, const int extraParam0) {
hipLaunchKernelGGL(( pnormPooling2dCuda<X, Z>), dim3(512), dim3(512), 4192, *block.getCudaStream(), vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW,
pH, pW, dH, dW, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static SD_KERNEL void maxPooling2dCuda(const void *vx, const sd::LongType *xShapeInfo, void *vz,
const sd::LongType *zShapeInfo, const int kH, const int kW, const int sH,
const int sW, const int pH, const int pW, const int dH, const int dW,
const int extraParam0) {
// input is [bS, iC, iH, iW]
// output is [bS, iC, oH, oW]
const auto x = reinterpret_cast<const X *>(vx);
auto z = reinterpret_cast<Z *>(vz);
__shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX,
length, kHEff, kWEff;
__shared__ bool fOrder;
if (threadIdx.x == 0) {
bS = shape::sizeAt(xShapeInfo, 0);
iC = shape::sizeAt(xShapeInfo, 1);
oH = shape::sizeAt(zShapeInfo, 2);
oW = shape::sizeAt(zShapeInfo, 3);
iH = shape::sizeAt(xShapeInfo, 2);
iW = shape::sizeAt(xShapeInfo, 3);
strideB = shape::stride(xShapeInfo)[0];
strideC = shape::stride(xShapeInfo)[1];
strideY = shape::stride(xShapeInfo)[2];
strideX = shape::stride(xShapeInfo)[3];
strideOB = shape::stride(zShapeInfo)[0];
strideOC = shape::stride(zShapeInfo)[1];
strideOY = shape::stride(zShapeInfo)[2];
strideOX = shape::stride(zShapeInfo)[3];
length = shape::length(zShapeInfo);
// Replace kernel H/W with *effective* kernel H/W accounting for dilatyon
kHEff = kH + (kH - 1) * (dH - 1);
kWEff = kW + (kW - 1) * (dW - 1);
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int index = tid; index < length; index += blockDim.x * gridDim.x) {
const int pw = index % oW;
const int ph = (index / oW) % oH;
const int c = (index / oW / oH) % iC;
const int n = index / oW / oH / iC;
int hstart = sH * ph - pH;
int wstart = sW * pw - pW;
int hend = hstart + kHEff;
int wend = wstart + kWEff;
if (hstart < 0) {
int f = sd::math::sd_ceil<Z, int>((Z)-hstart / (Z)dH);
hstart += f * dH;
}
if (wstart < 0) {
int f = sd::math::sd_ceil<Z, int>((Z)-wstart / (Z)dW);
wstart += f * dW;
}
if (hend > iH) {
int f = sd::math::sd_ceil<Z, int>((Z)(hend - iH) / (Z)dH);
hend -= f * dH;
}
if (wend > iW) {
int f = sd::math::sd_ceil<Z, int>((Z)(wend - iW) / (Z)dW);
wend -= f * dW;
}
// Accounts for dilation
int pool_size = sd::math::sd_ceil<double, int>((double)(hend - hstart) / (double)dH) *
sd::math::sd_ceil<double, int>((double)(wend - wstart) / (double)dW);
Z max = -sd::DataTypeUtils::max<Z>();
const X *inSlice = x + (n * strideB + c * strideC);
for (int h = hstart; h < hend; h += dH) {
for (int w = wstart; w < wend; w += dW) {
Z v = static_cast<Z>(inSlice[h * strideY + w * strideX]);
if (v > max) max = v;
}
}
z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = max;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static void maxPooling2dCudaLauncher(sd::LaunchContext &block, const void *vx, const sd::LongType *vxShapeInfo,
void *vz, const sd::LongType *vzShapeInfo, const int kH, const int kW,
const int sH, const int sW, const int pH, const int pW, const int dH, const int dW,
const int extraParam0) {
hipLaunchKernelGGL(( maxPooling2dCuda<X, Z>), dim3(512), dim3(512), 4192, *block.getCudaStream(), vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW,
pH, pW, dH, dW, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::pooling2d(sd::graph::Context &block, const NDArray &input, NDArray &output, const int kH,
const int kW, const int sH, const int sW, const int pH, const int pW, const int dH,
const int dW, const PoolingType poolingMode, const int extraParam0) {
if (!input.isActualOnDeviceSide()) input.syncToDevice();
switch (poolingMode) {
case MAX_POOL: {
BUILD_SINGLE_SELECTOR_TWICE(
input.dataType(), maxPooling2dCudaLauncher,
(*block.launchContext(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(),
output.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0),
SD_NUMERIC_TYPES);
} break;
case AVG_POOL: {
BUILD_SINGLE_SELECTOR_TWICE(
input.dataType(), avgPooling2dCudaLauncher,
(*block.launchContext(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(),
output.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0),
SD_NUMERIC_TYPES);
} break;
case PNORM_POOL: {
BUILD_SINGLE_SELECTOR_TWICE(
input.dataType(), pnormPooling2dCudaLauncher,
(*block.launchContext(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(),
output.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0),
SD_FLOAT_TYPES);
} break;
default:
throw std::runtime_error("Pooling2D: Unknown PoolingType used");
}
output.tickWriteDevice();
input.tickReadDevice();
auto result = hipStreamSynchronize(*block.launchContext()->getCudaStream());
if (result != 0) throw cuda_exception::build("Pooling2D failed", result);
}
} // namespace ops
} // namespace sd
|
8eb16e8223507327dcb520f8f1c476a4ece43895.cu
|
/*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
//
// @author Yurii Shyrma ([email protected])
//
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <math/templatemath.h>
#include <ops/declarable/helpers/convolutions.h>
namespace sd {
namespace ops {
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static SD_KERNEL void avgPooling2dCuda(const void *vx, const sd::LongType *xShapeInfo, void *vz,
const sd::LongType *zShapeInfo, const int kH, const int kW, const int sH,
const int sW, const int pH, const int pW, const int dH, const int dW,
const int extraParam0) {
// input is [bS, iC, iH, iW]
// output is [bS, iC, oH, oW]
const auto x = reinterpret_cast<const X *>(vx);
auto z = reinterpret_cast<Z *>(vz);
__shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX,
length, kHEff, kWEff;
if (threadIdx.x == 0) {
bS = shape::sizeAt(xShapeInfo, 0);
iC = shape::sizeAt(xShapeInfo, 1);
oH = shape::sizeAt(zShapeInfo, 2);
oW = shape::sizeAt(zShapeInfo, 3);
iH = shape::sizeAt(xShapeInfo, 2);
iW = shape::sizeAt(xShapeInfo, 3);
strideB = shape::stride(xShapeInfo)[0];
strideC = shape::stride(xShapeInfo)[1];
strideY = shape::stride(xShapeInfo)[2];
strideX = shape::stride(xShapeInfo)[3];
strideOB = shape::stride(zShapeInfo)[0];
strideOC = shape::stride(zShapeInfo)[1];
strideOY = shape::stride(zShapeInfo)[2];
strideOX = shape::stride(zShapeInfo)[3];
length = shape::length(zShapeInfo);
// Replace kernel H/W with *effective* kernel H/W accounting for dilatyon
kHEff = kH + (kH - 1) * (dH - 1);
kWEff = kW + (kW - 1) * (dW - 1);
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int index = tid; index < length; index += blockDim.x * gridDim.x) {
const int pw = index % oW;
const int ph = (index / oW) % oH;
const int c = (index / oW / oH) % iC;
const int n = index / oW / oH / iC;
int hstart = sH * ph - pH;
int wstart = sW * pw - pW;
int hend = hstart + kHEff;
int wend = wstart + kWEff;
if (hstart < 0) {
int f = sd::math::sd_ceil<Z, int>((Z)-hstart / (Z)dH);
hstart += f * dH;
}
if (wstart < 0) {
int f = sd::math::sd_ceil<Z, int>((Z)-wstart / (Z)dW);
wstart += f * dW;
}
if (hend > iH) {
int f = sd::math::sd_ceil<Z, int>((Z)(hend - iH) / (Z)dH);
hend -= f * dH;
}
if (wend > iW) {
int f = sd::math::sd_ceil<Z, int>((Z)(wend - iW) / (Z)dW);
wend -= f * dW;
}
// Accounts for dilation
int pool_size = sd::math::sd_ceil<double, int>((double)(hend - hstart) / (double)dH) *
sd::math::sd_ceil<double, int>((double)(wend - wstart) / (double)dW);
Z sum = 0.0f;
const X *inSlice = x + (n * strideB + c * strideC);
for (int h = hstart; h < hend; h += dH)
for (int w = wstart; w < wend; w += dW) sum += static_cast<Z>(inSlice[h * strideY + w * strideX]);
int divide_factor = pool_size; // Case 0: exclude padding
if (extraParam0 == 1) // Case 1: include padding
divide_factor = kH * kW;
z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = sum / static_cast<Z>(divide_factor);
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static void avgPooling2dCudaLauncher(sd::LaunchContext &block, const void *vx, const sd::LongType *vxShapeInfo,
void *vz, const sd::LongType *vzShapeInfo, const int kH, const int kW,
const int sH, const int sW, const int pH, const int pW, const int dH, const int dW,
const int extraParam0) {
avgPooling2dCuda<X, Z><<<512, 512, 4192, *block.getCudaStream()>>>(vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW,
pH, pW, dH, dW, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static SD_KERNEL void pnormPooling2dCuda(const void *vx, const sd::LongType *xShapeInfo, void *vz,
const sd::LongType *zShapeInfo, const int kH, const int kW, const int sH,
const int sW, const int pH, const int pW, const int dH, const int dW,
const int extraParam0) {
// input is [bS, iC, iH, iW]
// output is [bS, iC, oH, oW]
const auto x = reinterpret_cast<const X *>(vx);
auto z = reinterpret_cast<Z *>(vz);
__shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX,
length, kHEff, kWEff;
__shared__ bool fOrder;
if (threadIdx.x == 0) {
bS = shape::sizeAt(xShapeInfo, 0);
iC = shape::sizeAt(xShapeInfo, 1);
oH = shape::sizeAt(zShapeInfo, 2);
oW = shape::sizeAt(zShapeInfo, 3);
iH = shape::sizeAt(xShapeInfo, 2);
iW = shape::sizeAt(xShapeInfo, 3);
strideB = shape::stride(xShapeInfo)[0];
strideC = shape::stride(xShapeInfo)[1];
strideY = shape::stride(xShapeInfo)[2];
strideX = shape::stride(xShapeInfo)[3];
strideOB = shape::stride(zShapeInfo)[0];
strideOC = shape::stride(zShapeInfo)[1];
strideOY = shape::stride(zShapeInfo)[2];
strideOX = shape::stride(zShapeInfo)[3];
length = shape::length(zShapeInfo);
// Replace kernel H/W with *effective* kernel H/W accounting for dilatyon
kHEff = kH + (kH - 1) * (dH - 1);
kWEff = kW + (kW - 1) * (dW - 1);
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int index = tid; index < length; index += blockDim.x * gridDim.x) {
const int pw = index % oW;
const int ph = (index / oW) % oH;
const int c = (index / oW / oH) % iC;
const int n = index / oW / oH / iC;
int hstart = sH * ph - pH;
int wstart = sW * pw - pW;
int hend = hstart + kHEff;
int wend = wstart + kWEff;
if (hstart < 0) {
int f = sd::math::sd_ceil<Z, int>((Z)-hstart / (Z)dH);
hstart += f * dH;
}
if (wstart < 0) {
int f = sd::math::sd_ceil<Z, int>((Z)-wstart / (Z)dW);
wstart += f * dW;
}
if (hend > iH) {
int f = sd::math::sd_ceil<Z, int>((Z)(hend - iH) / (Z)dH);
hend -= f * dH;
}
if (wend > iW) {
int f = sd::math::sd_ceil<Z, int>((Z)(wend - iW) / (Z)dW);
wend -= f * dW;
}
// Accounts for dilation
int pool_size = sd::math::sd_ceil<double, int>((double)(hend - hstart) / (double)dH) *
sd::math::sd_ceil<double, int>((double)(wend - wstart) / (double)dW);
Z sum = 0.f;
const X *inSlice = x + (n * strideB + c * strideC);
for (int h = hstart; h < hend; h += dH)
for (int w = wstart; w < wend; w += dW)
sum += sd::math::sd_pow<Z, Z, Z>(static_cast<Z>(sd::math::sd_abs<X>(inSlice[h * strideY + w * strideX])),
extraParam0);
z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] =
sd::math::sd_pow<Z, Z, Z>(sum, (Z)1.0f / extraParam0);
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static void pnormPooling2dCudaLauncher(sd::LaunchContext &block, const void *vx, const sd::LongType *vxShapeInfo,
void *vz, const sd::LongType *vzShapeInfo, const int kH, const int kW,
const int sH, const int sW, const int pH, const int pW, const int dH,
const int dW, const int extraParam0) {
pnormPooling2dCuda<X, Z><<<512, 512, 4192, *block.getCudaStream()>>>(vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW,
pH, pW, dH, dW, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static SD_KERNEL void maxPooling2dCuda(const void *vx, const sd::LongType *xShapeInfo, void *vz,
const sd::LongType *zShapeInfo, const int kH, const int kW, const int sH,
const int sW, const int pH, const int pW, const int dH, const int dW,
const int extraParam0) {
// input is [bS, iC, iH, iW]
// output is [bS, iC, oH, oW]
const auto x = reinterpret_cast<const X *>(vx);
auto z = reinterpret_cast<Z *>(vz);
__shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX,
length, kHEff, kWEff;
__shared__ bool fOrder;
if (threadIdx.x == 0) {
bS = shape::sizeAt(xShapeInfo, 0);
iC = shape::sizeAt(xShapeInfo, 1);
oH = shape::sizeAt(zShapeInfo, 2);
oW = shape::sizeAt(zShapeInfo, 3);
iH = shape::sizeAt(xShapeInfo, 2);
iW = shape::sizeAt(xShapeInfo, 3);
strideB = shape::stride(xShapeInfo)[0];
strideC = shape::stride(xShapeInfo)[1];
strideY = shape::stride(xShapeInfo)[2];
strideX = shape::stride(xShapeInfo)[3];
strideOB = shape::stride(zShapeInfo)[0];
strideOC = shape::stride(zShapeInfo)[1];
strideOY = shape::stride(zShapeInfo)[2];
strideOX = shape::stride(zShapeInfo)[3];
length = shape::length(zShapeInfo);
// Replace kernel H/W with *effective* kernel H/W accounting for dilatyon
kHEff = kH + (kH - 1) * (dH - 1);
kWEff = kW + (kW - 1) * (dW - 1);
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int index = tid; index < length; index += blockDim.x * gridDim.x) {
const int pw = index % oW;
const int ph = (index / oW) % oH;
const int c = (index / oW / oH) % iC;
const int n = index / oW / oH / iC;
int hstart = sH * ph - pH;
int wstart = sW * pw - pW;
int hend = hstart + kHEff;
int wend = wstart + kWEff;
if (hstart < 0) {
int f = sd::math::sd_ceil<Z, int>((Z)-hstart / (Z)dH);
hstart += f * dH;
}
if (wstart < 0) {
int f = sd::math::sd_ceil<Z, int>((Z)-wstart / (Z)dW);
wstart += f * dW;
}
if (hend > iH) {
int f = sd::math::sd_ceil<Z, int>((Z)(hend - iH) / (Z)dH);
hend -= f * dH;
}
if (wend > iW) {
int f = sd::math::sd_ceil<Z, int>((Z)(wend - iW) / (Z)dW);
wend -= f * dW;
}
// Accounts for dilation
int pool_size = sd::math::sd_ceil<double, int>((double)(hend - hstart) / (double)dH) *
sd::math::sd_ceil<double, int>((double)(wend - wstart) / (double)dW);
Z max = -sd::DataTypeUtils::max<Z>();
const X *inSlice = x + (n * strideB + c * strideC);
for (int h = hstart; h < hend; h += dH) {
for (int w = wstart; w < wend; w += dW) {
Z v = static_cast<Z>(inSlice[h * strideY + w * strideX]);
if (v > max) max = v;
}
}
z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = max;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static void maxPooling2dCudaLauncher(sd::LaunchContext &block, const void *vx, const sd::LongType *vxShapeInfo,
void *vz, const sd::LongType *vzShapeInfo, const int kH, const int kW,
const int sH, const int sW, const int pH, const int pW, const int dH, const int dW,
const int extraParam0) {
maxPooling2dCuda<X, Z><<<512, 512, 4192, *block.getCudaStream()>>>(vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW,
pH, pW, dH, dW, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::pooling2d(sd::graph::Context &block, const NDArray &input, NDArray &output, const int kH,
const int kW, const int sH, const int sW, const int pH, const int pW, const int dH,
const int dW, const PoolingType poolingMode, const int extraParam0) {
if (!input.isActualOnDeviceSide()) input.syncToDevice();
switch (poolingMode) {
case MAX_POOL: {
BUILD_SINGLE_SELECTOR_TWICE(
input.dataType(), maxPooling2dCudaLauncher,
(*block.launchContext(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(),
output.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0),
SD_NUMERIC_TYPES);
} break;
case AVG_POOL: {
BUILD_SINGLE_SELECTOR_TWICE(
input.dataType(), avgPooling2dCudaLauncher,
(*block.launchContext(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(),
output.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0),
SD_NUMERIC_TYPES);
} break;
case PNORM_POOL: {
BUILD_SINGLE_SELECTOR_TWICE(
input.dataType(), pnormPooling2dCudaLauncher,
(*block.launchContext(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(),
output.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0),
SD_FLOAT_TYPES);
} break;
default:
throw std::runtime_error("Pooling2D: Unknown PoolingType used");
}
output.tickWriteDevice();
input.tickReadDevice();
auto result = cudaStreamSynchronize(*block.launchContext()->getCudaStream());
if (result != 0) throw cuda_exception::build("Pooling2D failed", result);
}
} // namespace ops
} // namespace sd
|
137bc58872dfe8042bfb0465fbf7ed03f8d1d328.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "MDArrayHelper.h"
void printArr(int *arr, int size)
{
for (int i=0; i<size; i++) std::cout << arr[i] << " ";
std::cout << std::endl;
}
void testOnHost()
{
int orjArr[9];
int dim = 2;
int dimSize[] = {3, 3};
int linSize = 9;
MDArrayHelper<int> arr(orjArr, dim, dimSize);
for (int i=0; i<dimSize[1]; i++)
{
int index[] = {1, i};
arr.set(0, index);
}
printArr(orjArr, linSize);
for (int i=0; i<dimSize[1]; i++)
{
int index[] = {0, i};
std::cout << arr.get(index) << " ";
}
std::cout << std::endl;
int index[] = {1, 0};
arr.reposition(index);
for (int i=0; i<dimSize[1]; i++)
{
int index[] = {1, i};
std::cout << arr.get(index) << " ";
}
std::cout << std::endl;
}
__global__ void testKernel(int *out)
{
int dim = 2;
int dimSize[] = {3, 3};
MDArrayHelper<int> arr(out, dim, dimSize);
for (int i=0; i<dimSize[1]; i++)
{
int index[] = {1, i};
arr.set(0, index);
}
int index[] = {1, 0};
arr.reposition(index);
for (int i=0; i<dimSize[1]; i++)
{
int index[] = {1, i};
arr.set(1, index);
}
}
void testOnDevice()
{
int arr[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
int *d_arr;
hipMalloc(&d_arr, 9 * sizeof(int));
hipMemcpy(d_arr, arr, 9 * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( testKernel) , dim3(1), dim3(1), 0, 0, d_arr);
hipMemcpy(arr, d_arr, 9 * sizeof(int), hipMemcpyDeviceToHost);
printArr(arr, 9);
}
int main()
{
testOnHost();
testOnDevice();
return 0;
}
/* Standart output:
1 2 3 0 0 0 7 8 9
1 2 3
7 8 9
1 2 3 0 0 0 1 1 1
*/
|
137bc58872dfe8042bfb0465fbf7ed03f8d1d328.cu
|
#include <iostream>
#include "MDArrayHelper.h"
void printArr(int *arr, int size)
{
for (int i=0; i<size; i++) std::cout << arr[i] << " ";
std::cout << std::endl;
}
void testOnHost()
{
int orjArr[9];
int dim = 2;
int dimSize[] = {3, 3};
int linSize = 9;
MDArrayHelper<int> arr(orjArr, dim, dimSize);
for (int i=0; i<dimSize[1]; i++)
{
int index[] = {1, i};
arr.set(0, index);
}
printArr(orjArr, linSize);
for (int i=0; i<dimSize[1]; i++)
{
int index[] = {0, i};
std::cout << arr.get(index) << " ";
}
std::cout << std::endl;
int index[] = {1, 0};
arr.reposition(index);
for (int i=0; i<dimSize[1]; i++)
{
int index[] = {1, i};
std::cout << arr.get(index) << " ";
}
std::cout << std::endl;
}
__global__ void testKernel(int *out)
{
int dim = 2;
int dimSize[] = {3, 3};
MDArrayHelper<int> arr(out, dim, dimSize);
for (int i=0; i<dimSize[1]; i++)
{
int index[] = {1, i};
arr.set(0, index);
}
int index[] = {1, 0};
arr.reposition(index);
for (int i=0; i<dimSize[1]; i++)
{
int index[] = {1, i};
arr.set(1, index);
}
}
void testOnDevice()
{
int arr[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
int *d_arr;
cudaMalloc(&d_arr, 9 * sizeof(int));
cudaMemcpy(d_arr, arr, 9 * sizeof(int), cudaMemcpyHostToDevice);
testKernel <<<1, 1>>> (d_arr);
cudaMemcpy(arr, d_arr, 9 * sizeof(int), cudaMemcpyDeviceToHost);
printArr(arr, 9);
}
int main()
{
testOnHost();
testOnDevice();
return 0;
}
/* Standart output:
1 2 3 0 0 0 7 8 9
1 2 3
7 8 9
1 2 3 0 0 0 1 1 1
*/
|
00ede808de6aa1dd5193dfd4059a81981480170e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels.h"
#define DEF_SHRD_VAL 0
#define DEF_DIM_X 1024
//!
//! This binning kernel does not prevent race conditions.
//!
__global__ void bin_kernel_simple(int * random_sequence, int * binned, int bin_width, int rseq_len)
{
const int index = threadIdx.x + blockIdx.x * blockDim.x;
if ( index < rseq_len) binned[random_sequence[index] / bin_width]++;
}
//!
//! Kernel that makes use of the atomic operator to eliminate race conditions.
//!
__global__ void bin_kernel_atomic(int * random_sequence, int * binned, int bin_width, int rseq_len)
{
///< TODO: Implement code that uses atomic operations to bin values.
}
///!
///! Write the necessary code to execute binning kernels.
///!
void execute_kernel(int * random_sequence_device, int * binned_device, int bin_width, int bins, int rseq_len, float & cuda_ms)
{
///< TODO: Implement the necessary code to start/stop the timers and execute the kernel.
}
|
00ede808de6aa1dd5193dfd4059a81981480170e.cu
|
#include "kernels.h"
#define DEF_SHRD_VAL 0
#define DEF_DIM_X 1024
//!
//! This binning kernel does not prevent race conditions.
//!
__global__ void bin_kernel_simple(int * random_sequence, int * binned, int bin_width, int rseq_len)
{
const int index = threadIdx.x + blockIdx.x * blockDim.x;
if ( index < rseq_len) binned[random_sequence[index] / bin_width]++;
}
//!
//! Kernel that makes use of the atomic operator to eliminate race conditions.
//!
__global__ void bin_kernel_atomic(int * random_sequence, int * binned, int bin_width, int rseq_len)
{
///< TODO: Implement code that uses atomic operations to bin values.
}
///!
///! Write the necessary code to execute binning kernels.
///!
void execute_kernel(int * random_sequence_device, int * binned_device, int bin_width, int bins, int rseq_len, float & cuda_ms)
{
///< TODO: Implement the necessary code to start/stop the timers and execute the kernel.
}
|
106cf824299ae12add876623501c7cfb0d709563.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/pad3d_grad_kernel.h"
namespace phi {
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void Pad3DGradConstNCDHW(const int in_size,
T* d_in_data,
const int num,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const T* d_out_data) {
CUDA_KERNEL_LOOP(in_index, in_size) {
const int in_w = in_index % in_width;
int nc = in_index / in_width;
const int in_h = nc % in_height;
nc /= in_height;
const int in_d = nc % in_depth;
nc /= in_depth;
const int out_d = in_d + pad_front;
const int out_h = in_h + pad_top;
const int out_w = in_w + pad_left;
d_in_data[in_index] =
d_out_data[nc * out_depth * out_height * out_width +
out_d * out_height * out_width + out_h * out_width + out_w];
}
}
template <typename T>
__global__ void Pad3DGradConstNDHWC(const int in_size,
T* d_in_data,
const int num,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const T* d_out_data) {
CUDA_KERNEL_LOOP(in_index, in_size) {
const int c = in_index % channels;
int n = in_index / channels;
const int in_w = n % in_width;
n /= in_width;
const int in_h = n % in_height;
n /= in_height;
const int in_d = n % in_depth;
n /= in_depth;
const int out_d = in_d + pad_front;
const int out_h = in_h + pad_top;
const int out_w = in_w + pad_left;
d_in_data[in_index] =
d_out_data[n * out_depth * out_height * out_width * channels +
out_d * out_height * out_width * channels +
out_h * out_width * channels + out_w * channels + c];
}
}
template <typename T>
__global__ void Pad3DGradReflectNCDHW(const int out_size,
T* d_in_data,
const int num,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const T* d_out_data) {
CUDA_KERNEL_LOOP(out_index, out_size) {
int nc = out_index / out_width;
const int out_w = out_index % out_width;
const int out_h = nc % out_height;
nc /= out_height;
const int out_d = nc % out_depth;
nc /= out_depth;
int in_d = out_d - pad_front;
int in_h = out_h - pad_top;
int in_w = out_w - pad_left;
in_d = max(in_d, -in_d);
in_h = max(in_h, -in_h);
in_w = max(in_w, -in_w);
in_d = min(in_d, 2 * in_depth - in_d - 2);
in_h = min(in_h, 2 * in_height - in_h - 2);
in_w = min(in_w, 2 * in_width - in_w - 2);
paddle::platform::CudaAtomicAdd(
&d_in_data[nc * in_depth * in_height * in_width +
in_d * in_height * in_width + in_h * in_width + in_w],
d_out_data[out_index]);
}
}
template <typename T>
__global__ void Pad3DGradReflectNDHWC(const int out_size,
T* d_in_data,
const int num,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const T* d_out_data) {
CUDA_KERNEL_LOOP(out_index, out_size) {
const int c = out_index % channels;
int n = out_index / channels;
const int out_w = n % out_width;
n /= out_width;
const int out_h = n % out_height;
n /= out_height;
const int out_d = n % out_depth;
n /= out_depth;
int in_d = out_d - pad_front;
int in_h = out_h - pad_top;
int in_w = out_w - pad_left;
in_d = max(in_d, -in_d);
in_h = max(in_h, -in_h);
in_w = max(in_w, -in_w);
in_d = min(in_d, in_depth * 2 - in_d - 2);
in_h = min(in_h, in_height * 2 - in_h - 2);
in_w = min(in_w, in_width * 2 - in_w - 2);
paddle::platform::CudaAtomicAdd(
&d_in_data[n * in_depth * in_height * in_width * channels +
in_d * in_height * in_width * channels +
in_h * in_width * channels + in_w * channels + c],
d_out_data[out_index]);
}
}
template <typename T>
__global__ void Pad3DGradReplicateNCDHW(const int out_size,
T* d_in_data,
const int num,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const T* d_out_data) {
CUDA_KERNEL_LOOP(out_index, out_size) {
int nc = out_index / out_width;
const int out_w = out_index % out_width;
const int out_h = nc % out_height;
nc /= out_height;
const int out_d = nc % out_depth;
nc /= out_depth;
const int in_d = min(in_depth - 1, max(out_d - pad_front, 0));
const int in_h = min(in_height - 1, max(out_h - pad_top, 0));
const int in_w = min(in_width - 1, max(out_w - pad_left, 0));
paddle::platform::CudaAtomicAdd(
&d_in_data[nc * in_depth * in_height * in_width +
in_d * in_height * in_width + in_h * in_width + in_w],
d_out_data[out_index]);
}
}
template <typename T>
__global__ void Pad3DGradReplicateNDHWC(const int out_size,
T* d_in_data,
const int num,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const T* d_out_data) {
CUDA_KERNEL_LOOP(out_index, out_size) {
const int c = out_index % channels;
int n = out_index / channels;
const int out_w = n % out_width;
n /= out_width;
const int out_h = n % out_height;
n /= out_height;
const int out_d = n % out_depth;
n /= out_depth;
const int in_d = min(in_depth - 1, max(out_d - pad_front, 0));
const int in_h = min(in_height - 1, max(out_h - pad_top, 0));
const int in_w = min(in_width - 1, max(out_w - pad_left, 0));
paddle::platform::CudaAtomicAdd(
&d_in_data[n * in_depth * in_height * in_width * channels +
in_d * in_height * in_width * channels +
in_h * in_width * channels + in_w * channels + c],
d_out_data[out_index]);
}
}
template <typename T>
__global__ void Pad3DGradCircularNCDHW(const int out_size,
T* d_in_data,
const int num,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const T* d_out_data) {
CUDA_KERNEL_LOOP(out_index, out_size) {
int nc = out_index / out_width;
const int out_w = out_index % out_width;
const int out_h = nc % out_height;
nc /= out_height;
const int out_d = nc % out_depth;
nc /= out_depth;
int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth;
int in_h = ((out_h - pad_top) % in_height + in_height) % in_height;
int in_w = ((out_w - pad_left) % in_width + in_width) % in_width;
paddle::platform::CudaAtomicAdd(
&d_in_data[nc * in_depth * in_height * in_width +
in_d * in_height * in_width + in_h * in_width + in_w],
d_out_data[out_index]);
}
}
template <typename T>
__global__ void Pad3DGradCircularNDHWC(const int out_size,
T* d_in_data,
const int num,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const T* d_out_data) {
CUDA_KERNEL_LOOP(out_index, out_size) {
const int c = out_index % channels;
int n = out_index / channels;
const int out_w = n % out_width;
n /= out_width;
const int out_h = n % out_height;
n /= out_height;
const int out_d = n % out_depth;
n /= out_depth;
int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth;
int in_h = ((out_h - pad_top) % in_height + in_height) % in_height;
int in_w = ((out_w - pad_left) % in_width + in_width) % in_width;
paddle::platform::CudaAtomicAdd(
&d_in_data[n * in_depth * in_height * in_width * channels +
in_d * in_height * in_width * channels +
in_h * in_width * channels + in_w * channels + c],
d_out_data[out_index]);
}
}
template <typename T, typename Context>
void Pad3dGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const IntArray& paddings,
const std::string& mode,
float pad_value,
const std::string& data_format,
DenseTensor* x_grad) {
std::vector<int64_t> pads = paddings.GetData();
auto* d_out = &out_grad;
auto* d_in = x_grad;
auto d_in_dims = d_in->dims();
auto d_out_dims = d_out->dims();
const T* d_out_data = d_out->data<T>();
T* d_in_data = dev_ctx.template Alloc<T>(d_in);
phi::funcs::SetConstant<Context, T>()(dev_ctx, d_in, static_cast<T>(0));
const int pad_left = pads[0];
const int pad_top = pads[2];
const int pad_front = pads[4];
const int num = d_in_dims[0];
auto stream = dev_ctx.stream();
int block = PADDLE_CUDA_NUM_THREADS;
const int out_size = d_out->numel();
const int in_size = d_in->numel();
int grid = (out_size + block - 1) / block;
if (data_format == "NCDHW") {
const int channels = d_in_dims[1];
const int in_depth = d_in_dims[2];
const int in_height = d_in_dims[3];
const int in_width = d_in_dims[4];
const int out_depth = d_out_dims[2];
const int out_height = d_out_dims[3];
const int out_width = d_out_dims[4];
if (mode == "reflect") {
hipLaunchKernelGGL(( Pad3DGradReflectNCDHW<T>), dim3(grid), dim3(block), 0, stream, out_size,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
d_out_data);
} else if (mode == "replicate") {
hipLaunchKernelGGL(( Pad3DGradReplicateNCDHW<T>), dim3(grid), dim3(block), 0, stream, out_size,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
d_out_data);
} else if (mode == "circular") {
hipLaunchKernelGGL(( Pad3DGradCircularNCDHW<T>), dim3(grid), dim3(block), 0, stream, out_size,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
d_out_data);
} else {
grid = (in_size + block - 1) / block;
hipLaunchKernelGGL(( Pad3DGradConstNCDHW<T>), dim3(grid), dim3(block), 0, stream, in_size,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
d_out_data);
}
} else {
const int channels = d_in_dims[4];
const int in_depth = d_in_dims[1];
const int in_height = d_in_dims[2];
const int in_width = d_in_dims[3];
const int out_depth = d_out_dims[1];
const int out_height = d_out_dims[2];
const int out_width = d_out_dims[3];
if (mode == "reflect") {
hipLaunchKernelGGL(( Pad3DGradReflectNDHWC<T>), dim3(grid), dim3(block), 0, stream, out_size,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
d_out_data);
} else if (mode == "replicate") {
hipLaunchKernelGGL(( Pad3DGradReplicateNDHWC<T>), dim3(grid), dim3(block), 0, stream, out_size,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
d_out_data);
} else if (mode == "circular") {
hipLaunchKernelGGL(( Pad3DGradCircularNDHWC<T>), dim3(grid), dim3(block), 0, stream, out_size,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
d_out_data);
} else {
grid = (in_size + block - 1) / block;
hipLaunchKernelGGL(( Pad3DGradConstNDHWC<T>), dim3(grid), dim3(block), 0, stream, in_size,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
d_out_data);
}
}
}
} // namespace phi
PD_REGISTER_KERNEL(
pad3d_grad, GPU, ALL_LAYOUT, phi::Pad3dGradKernel, float, double) {}
|
106cf824299ae12add876623501c7cfb0d709563.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/pad3d_grad_kernel.h"
namespace phi {
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void Pad3DGradConstNCDHW(const int in_size,
T* d_in_data,
const int num,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const T* d_out_data) {
CUDA_KERNEL_LOOP(in_index, in_size) {
const int in_w = in_index % in_width;
int nc = in_index / in_width;
const int in_h = nc % in_height;
nc /= in_height;
const int in_d = nc % in_depth;
nc /= in_depth;
const int out_d = in_d + pad_front;
const int out_h = in_h + pad_top;
const int out_w = in_w + pad_left;
d_in_data[in_index] =
d_out_data[nc * out_depth * out_height * out_width +
out_d * out_height * out_width + out_h * out_width + out_w];
}
}
template <typename T>
__global__ void Pad3DGradConstNDHWC(const int in_size,
T* d_in_data,
const int num,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const T* d_out_data) {
CUDA_KERNEL_LOOP(in_index, in_size) {
const int c = in_index % channels;
int n = in_index / channels;
const int in_w = n % in_width;
n /= in_width;
const int in_h = n % in_height;
n /= in_height;
const int in_d = n % in_depth;
n /= in_depth;
const int out_d = in_d + pad_front;
const int out_h = in_h + pad_top;
const int out_w = in_w + pad_left;
d_in_data[in_index] =
d_out_data[n * out_depth * out_height * out_width * channels +
out_d * out_height * out_width * channels +
out_h * out_width * channels + out_w * channels + c];
}
}
template <typename T>
__global__ void Pad3DGradReflectNCDHW(const int out_size,
T* d_in_data,
const int num,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const T* d_out_data) {
CUDA_KERNEL_LOOP(out_index, out_size) {
int nc = out_index / out_width;
const int out_w = out_index % out_width;
const int out_h = nc % out_height;
nc /= out_height;
const int out_d = nc % out_depth;
nc /= out_depth;
int in_d = out_d - pad_front;
int in_h = out_h - pad_top;
int in_w = out_w - pad_left;
in_d = max(in_d, -in_d);
in_h = max(in_h, -in_h);
in_w = max(in_w, -in_w);
in_d = min(in_d, 2 * in_depth - in_d - 2);
in_h = min(in_h, 2 * in_height - in_h - 2);
in_w = min(in_w, 2 * in_width - in_w - 2);
paddle::platform::CudaAtomicAdd(
&d_in_data[nc * in_depth * in_height * in_width +
in_d * in_height * in_width + in_h * in_width + in_w],
d_out_data[out_index]);
}
}
template <typename T>
__global__ void Pad3DGradReflectNDHWC(const int out_size,
T* d_in_data,
const int num,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const T* d_out_data) {
CUDA_KERNEL_LOOP(out_index, out_size) {
const int c = out_index % channels;
int n = out_index / channels;
const int out_w = n % out_width;
n /= out_width;
const int out_h = n % out_height;
n /= out_height;
const int out_d = n % out_depth;
n /= out_depth;
int in_d = out_d - pad_front;
int in_h = out_h - pad_top;
int in_w = out_w - pad_left;
in_d = max(in_d, -in_d);
in_h = max(in_h, -in_h);
in_w = max(in_w, -in_w);
in_d = min(in_d, in_depth * 2 - in_d - 2);
in_h = min(in_h, in_height * 2 - in_h - 2);
in_w = min(in_w, in_width * 2 - in_w - 2);
paddle::platform::CudaAtomicAdd(
&d_in_data[n * in_depth * in_height * in_width * channels +
in_d * in_height * in_width * channels +
in_h * in_width * channels + in_w * channels + c],
d_out_data[out_index]);
}
}
template <typename T>
__global__ void Pad3DGradReplicateNCDHW(const int out_size,
T* d_in_data,
const int num,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const T* d_out_data) {
CUDA_KERNEL_LOOP(out_index, out_size) {
int nc = out_index / out_width;
const int out_w = out_index % out_width;
const int out_h = nc % out_height;
nc /= out_height;
const int out_d = nc % out_depth;
nc /= out_depth;
const int in_d = min(in_depth - 1, max(out_d - pad_front, 0));
const int in_h = min(in_height - 1, max(out_h - pad_top, 0));
const int in_w = min(in_width - 1, max(out_w - pad_left, 0));
paddle::platform::CudaAtomicAdd(
&d_in_data[nc * in_depth * in_height * in_width +
in_d * in_height * in_width + in_h * in_width + in_w],
d_out_data[out_index]);
}
}
template <typename T>
__global__ void Pad3DGradReplicateNDHWC(const int out_size,
T* d_in_data,
const int num,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const T* d_out_data) {
CUDA_KERNEL_LOOP(out_index, out_size) {
const int c = out_index % channels;
int n = out_index / channels;
const int out_w = n % out_width;
n /= out_width;
const int out_h = n % out_height;
n /= out_height;
const int out_d = n % out_depth;
n /= out_depth;
const int in_d = min(in_depth - 1, max(out_d - pad_front, 0));
const int in_h = min(in_height - 1, max(out_h - pad_top, 0));
const int in_w = min(in_width - 1, max(out_w - pad_left, 0));
paddle::platform::CudaAtomicAdd(
&d_in_data[n * in_depth * in_height * in_width * channels +
in_d * in_height * in_width * channels +
in_h * in_width * channels + in_w * channels + c],
d_out_data[out_index]);
}
}
template <typename T>
__global__ void Pad3DGradCircularNCDHW(const int out_size,
T* d_in_data,
const int num,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const T* d_out_data) {
CUDA_KERNEL_LOOP(out_index, out_size) {
int nc = out_index / out_width;
const int out_w = out_index % out_width;
const int out_h = nc % out_height;
nc /= out_height;
const int out_d = nc % out_depth;
nc /= out_depth;
int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth;
int in_h = ((out_h - pad_top) % in_height + in_height) % in_height;
int in_w = ((out_w - pad_left) % in_width + in_width) % in_width;
paddle::platform::CudaAtomicAdd(
&d_in_data[nc * in_depth * in_height * in_width +
in_d * in_height * in_width + in_h * in_width + in_w],
d_out_data[out_index]);
}
}
template <typename T>
__global__ void Pad3DGradCircularNDHWC(const int out_size,
T* d_in_data,
const int num,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const T* d_out_data) {
CUDA_KERNEL_LOOP(out_index, out_size) {
const int c = out_index % channels;
int n = out_index / channels;
const int out_w = n % out_width;
n /= out_width;
const int out_h = n % out_height;
n /= out_height;
const int out_d = n % out_depth;
n /= out_depth;
int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth;
int in_h = ((out_h - pad_top) % in_height + in_height) % in_height;
int in_w = ((out_w - pad_left) % in_width + in_width) % in_width;
paddle::platform::CudaAtomicAdd(
&d_in_data[n * in_depth * in_height * in_width * channels +
in_d * in_height * in_width * channels +
in_h * in_width * channels + in_w * channels + c],
d_out_data[out_index]);
}
}
template <typename T, typename Context>
void Pad3dGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const IntArray& paddings,
const std::string& mode,
float pad_value,
const std::string& data_format,
DenseTensor* x_grad) {
std::vector<int64_t> pads = paddings.GetData();
auto* d_out = &out_grad;
auto* d_in = x_grad;
auto d_in_dims = d_in->dims();
auto d_out_dims = d_out->dims();
const T* d_out_data = d_out->data<T>();
T* d_in_data = dev_ctx.template Alloc<T>(d_in);
phi::funcs::SetConstant<Context, T>()(dev_ctx, d_in, static_cast<T>(0));
const int pad_left = pads[0];
const int pad_top = pads[2];
const int pad_front = pads[4];
const int num = d_in_dims[0];
auto stream = dev_ctx.stream();
int block = PADDLE_CUDA_NUM_THREADS;
const int out_size = d_out->numel();
const int in_size = d_in->numel();
int grid = (out_size + block - 1) / block;
if (data_format == "NCDHW") {
const int channels = d_in_dims[1];
const int in_depth = d_in_dims[2];
const int in_height = d_in_dims[3];
const int in_width = d_in_dims[4];
const int out_depth = d_out_dims[2];
const int out_height = d_out_dims[3];
const int out_width = d_out_dims[4];
if (mode == "reflect") {
Pad3DGradReflectNCDHW<T><<<grid, block, 0, stream>>>(out_size,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
d_out_data);
} else if (mode == "replicate") {
Pad3DGradReplicateNCDHW<T><<<grid, block, 0, stream>>>(out_size,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
d_out_data);
} else if (mode == "circular") {
Pad3DGradCircularNCDHW<T><<<grid, block, 0, stream>>>(out_size,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
d_out_data);
} else {
grid = (in_size + block - 1) / block;
Pad3DGradConstNCDHW<T><<<grid, block, 0, stream>>>(in_size,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
d_out_data);
}
} else {
const int channels = d_in_dims[4];
const int in_depth = d_in_dims[1];
const int in_height = d_in_dims[2];
const int in_width = d_in_dims[3];
const int out_depth = d_out_dims[1];
const int out_height = d_out_dims[2];
const int out_width = d_out_dims[3];
if (mode == "reflect") {
Pad3DGradReflectNDHWC<T><<<grid, block, 0, stream>>>(out_size,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
d_out_data);
} else if (mode == "replicate") {
Pad3DGradReplicateNDHWC<T><<<grid, block, 0, stream>>>(out_size,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
d_out_data);
} else if (mode == "circular") {
Pad3DGradCircularNDHWC<T><<<grid, block, 0, stream>>>(out_size,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
d_out_data);
} else {
grid = (in_size + block - 1) / block;
Pad3DGradConstNDHWC<T><<<grid, block, 0, stream>>>(in_size,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
d_out_data);
}
}
}
} // namespace phi
PD_REGISTER_KERNEL(
pad3d_grad, GPU, ALL_LAYOUT, phi::Pad3dGradKernel, float, double) {}
|
a8fac966a47514d8c422c6abac70dce0a09bda90.hip
|
// !!! This is a file automatically generated by hipify!!!
///sta programa calcula la versin paralelizada del algoritmo FFT_DIF_DIT_TD
///(03/08/2016)
///sta versin sirve para calcular los tiempos de ejecucin para distintos nmeros de muestras e iteraciones
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hipfft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_complex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(const long N,const int Li);
void arreglo_W(const long N);
void asign_rap(const long N,const int Li,const int Lo);
void factor(const long N);
void product(int vector_1[50],int vector_2[50],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(const long N, const int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(const long N,const int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuFloatComplex *x_host;
cuFloatComplex *W_host;
cuFloatComplex *y_host;
cuFloatComplex *z_host;
cuFloatComplex *X_host;
cuFloatComplex *x_device;
cuFloatComplex *W_device;
cuFloatComplex *y_device;
cuFloatComplex *z_device;
cuFloatComplex *X_device;
hipfftComplex *in,*out;
int Dip,Dop,P;
int vF[50]; //Almacena los factores de N
int svF=0; //Almacena el numero de factores de N
int Prod[50];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Nmero de elementos del vector de entrada
/// Li >>> Nmero de elementos de entrada diferentes de cero
/// Lo >>> Nmero de elementos de salida requeridos
/// loop >>> Nmero de iteraciones
/// muestras >>> Nmero de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el nmero de elementos del vector de entrada x(n)
const long N = 2;
///Ingrese el nmero de elementos de entrada diferentes de cero (Li)
const int Li = 1;
///Ingrese el nmero de elementos de salida requeridos (Lo)
const int Lo = 2;
///Ingrese el nmero de iteraciones requeridas
const int loop = 1;
///Ingrese el nmero de muestras requeridas
const int muestras = 1;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Funcin principal
int main()
{
int i,j;
float suma;
float promedio[muestras];
///Se crean los archivos binarios donde se guardarn los datos
//FILE *da;
//da = fopen("FFT_DIF_DIT_TD_VERSION_PARALELIZADA.bin","a+b"); //Crea o sobre escribe archivo
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i=1;i<=muestras;i++)
{
suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
hipEvent_t start_app, stop_app;
hipEventCreate(&start_app);
hipEventCreate(&stop_app);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
hipEventRecord(start_app,0);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(N,Li);
//Se generan en el host los valores del arreglo W[N]
arreglo_W(N);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Clculo en el host del factor P
P = N/(Dip*Dop);
printf("\n\n FACTOR P:\n\n");
printf(" %d ",P);
//Funcin auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Funcin auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Funcin auxiliar del host para ejecutar la etapa de salida
etapa_salida();
/////////////////////////////////////////////////////////////////////////////
//printf("\n size_cuFloatComplex = %d\n",sizeof(cuFloatComplex));
//printf("\n size_cufftComplex = %d\n",sizeof(hipfftComplex));
/////////////////////////////////////////////////////////////////////////////
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(y_host);
free(z_host);
free(X_host);
hipFree(x_device);
hipFree(W_device);
hipFree(y_device);
hipFree(z_device);
hipFree(X_device);
hipFree(in);
hipFree(out);
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
hipEventRecord(stop_app,0);
hipEventSynchronize(stop_app);
hipEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
hipEventDestroy(start_app);
hipEventDestroy(stop_app);
}
promedio[i-1] = suma/(float)loop;
printf(" \n\n%d - Tiempo promedio para N = %ld >>> %f mS\n",i,N,promedio[i-1]);
}
//fwrite(promedio,sizeof(float),muestras,da);
//fclose(da);
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//sta funcin genera el vector de entrada x[n]
void vector_entrada_xn(const long N,const int Li)
{
//Declaracin de variables locales
int k;
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se dan valores a x[n]
for(k=0;k<N;k++)
{
if(k < Li)
{
//x[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%21));
x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
}
else
{
x_host[k] = make_cuFloatComplex((float)(0.0),(float)(0.0));
}
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<N;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k]));
}
*/
}
//sta funcin genera el arreglo W
void arreglo_W(const long N)
{
//Declaracin de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n]));
}
*/
}
//sta funcin genera los factores Dip y Dop
void asign_rap(const long N,const int Li,const int Lo)
{
//Declaracin de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[50];
int k[50];
int G;
int g,i,t,ta;
int Dipt[50],Dopt[50];
float distrapt,distrap;
int Pos,h,Poss;
int nk[50];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el nmero de factores de "N"
factor(N);
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
}
//sta funcin encuentra los factores de "N"
void factor(const long N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//sta funcin encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[50],int vector_2[50],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Funcin auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,n1,n2;
//Asignacin de memoria en el device
hipMalloc((void**)&x_device,N*sizeof(cuFloatComplex));
hipMalloc((void**)&W_device,N*sizeof(cuFloatComplex));
hipMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "y"
y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Envo de los arreglos x y W hacia la memoria global del device
hipMemcpy(x_device,x_host,N*sizeof(cuFloatComplex),hipMemcpyHostToDevice);
hipMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),hipMemcpyHostToDevice);
//Dimensionamiento del grid para la funcin kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
hipLaunchKernelGGL(( inputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Copia del arreglo "y" del device hacia el host
hipMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
}
//funcin kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(const long N, const int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y)
{
int n1,n2;
cuFloatComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generacin de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1);
}
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0);
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Funcin auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignacin de memoria en el device para "z"
hipMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "z"
z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Asignacin de memoria en el device para "in" y "out"
hipMalloc((void**)&in,sizeof(hipfftComplex)*P*Dip*Dop);
hipMalloc((void**)&out,sizeof(hipfftComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
hipMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se crea un plan
hipfftHandle plan;
hipfftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,HIPFFT_C2C,Dip*Dop);
//Ejecucin del plan
hipfftExecC2C(plan,in,out,HIPFFT_FORWARD);
//Se destruye el plan
hipfftDestroy(plan);
//Se copian los datos del arreglo "out" al arreglo "z_device"
hipMemcpy(z_device,out,sizeof(hipfftComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
hipMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
}
//Funcin auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int m;
//Asignacin de memoria en el device para "X"
hipMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "X"
X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo);
//Dimensionamiento del grid para la funcin kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
hipLaunchKernelGGL(( outputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
hipMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,hipMemcpyDeviceToHost);
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
}
//funcin kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(const long N,const int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X)
{
//Declaracin de variables locales
int n1,k_aux,k1,k2,a,b;
cuFloatComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Clculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Clculo de X(k) para 0<=k<=Dip-1.
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
else
{
if(Dop <= 4)
{
//Usando el mtodo directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
}
else
{
//Usando el mtodo filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t4 = cuCsubf(t3,t2);
}
if(n1 == (Dop-1))
{
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
}
}
}
}
}
}
}
|
a8fac966a47514d8c422c6abac70dce0a09bda90.cu
|
///Ésta programa calcula la versión paralelizada del algoritmo FFT_DIF_DIT_TD
///(03/08/2016)
///Ésta versión sirve para calcular los tiempos de ejecución para distintos números de muestras e iteraciones
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuComplex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIÓN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(const long N,const int Li);
void arreglo_W(const long N);
void asign_rap(const long N,const int Li,const int Lo);
void factor(const long N);
void product(int vector_1[50],int vector_2[50],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(const long N, const int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(const long N,const int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIÓN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuFloatComplex *x_host;
cuFloatComplex *W_host;
cuFloatComplex *y_host;
cuFloatComplex *z_host;
cuFloatComplex *X_host;
cuFloatComplex *x_device;
cuFloatComplex *W_device;
cuFloatComplex *y_device;
cuFloatComplex *z_device;
cuFloatComplex *X_device;
cufftComplex *in,*out;
int Dip,Dop,P;
int vF[50]; //Almacena los factores de N
int svF=0; //Almacena el numero de factores de N
int Prod[50];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Número de elementos del vector de entrada
/// Li >>> Número de elementos de entrada diferentes de cero
/// Lo >>> Número de elementos de salida requeridos
/// loop >>> Número de iteraciones
/// muestras >>> Número de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el número de elementos del vector de entrada x(n)
const long N = 2;
///Ingrese el número de elementos de entrada diferentes de cero (Li)
const int Li = 1;
///Ingrese el número de elementos de salida requeridos (Lo)
const int Lo = 2;
///Ingrese el número de iteraciones requeridas
const int loop = 1;
///Ingrese el número de muestras requeridas
const int muestras = 1;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Función principal
int main()
{
int i,j;
float suma;
float promedio[muestras];
///Se crean los archivos binarios donde se guardarán los datos
//FILE *da;
//da = fopen("FFT_DIF_DIT_TD_VERSION_PARALELIZADA.bin","a+b"); //Crea o sobre escribe archivo
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i=1;i<=muestras;i++)
{
suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
cudaEvent_t start_app, stop_app;
cudaEventCreate(&start_app);
cudaEventCreate(&stop_app);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
cudaEventRecord(start_app,0);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(N,Li);
//Se generan en el host los valores del arreglo W[N]
arreglo_W(N);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Cálculo en el host del factor P
P = N/(Dip*Dop);
printf("\n\n FACTOR P:\n\n");
printf(" %d ",P);
//Función auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Función auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Función auxiliar del host para ejecutar la etapa de salida
etapa_salida();
/////////////////////////////////////////////////////////////////////////////
//printf("\n size_cuFloatComplex = %d\n",sizeof(cuFloatComplex));
//printf("\n size_cufftComplex = %d\n",sizeof(cufftComplex));
/////////////////////////////////////////////////////////////////////////////
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(y_host);
free(z_host);
free(X_host);
cudaFree(x_device);
cudaFree(W_device);
cudaFree(y_device);
cudaFree(z_device);
cudaFree(X_device);
cudaFree(in);
cudaFree(out);
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
cudaEventRecord(stop_app,0);
cudaEventSynchronize(stop_app);
cudaEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
cudaEventDestroy(start_app);
cudaEventDestroy(stop_app);
}
promedio[i-1] = suma/(float)loop;
printf(" \n\n%d - Tiempo promedio para N = %ld >>> %f mS\n",i,N,promedio[i-1]);
}
//fwrite(promedio,sizeof(float),muestras,da);
//fclose(da);
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Ésta función genera el vector de entrada x[n]
void vector_entrada_xn(const long N,const int Li)
{
//Declaración de variables locales
int k;
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se dan valores a x[n]
for(k=0;k<N;k++)
{
if(k < Li)
{
//x[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%21));
x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
}
else
{
x_host[k] = make_cuFloatComplex((float)(0.0),(float)(0.0));
}
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<N;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k]));
}
*/
}
//Ésta función genera el arreglo W
void arreglo_W(const long N)
{
//Declaración de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n]));
}
*/
}
//Ésta función genera los factores Dip y Dop
void asign_rap(const long N,const int Li,const int Lo)
{
//Declaración de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[50];
int k[50];
int G;
int g,i,t,ta;
int Dipt[50],Dopt[50];
float distrapt,distrap;
int Pos,h,Poss;
int nk[50];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el número de factores de "N"
factor(N);
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
}
//Ésta función encuentra los factores de "N"
void factor(const long N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//Ésta función encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[50],int vector_2[50],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Función auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,n1,n2;
//Asignación de memoria en el device
cudaMalloc((void**)&x_device,N*sizeof(cuFloatComplex));
cudaMalloc((void**)&W_device,N*sizeof(cuFloatComplex));
cudaMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "y"
y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Envío de los arreglos x y W hacia la memoria global del device
cudaMemcpy(x_device,x_host,N*sizeof(cuFloatComplex),cudaMemcpyHostToDevice);
cudaMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),cudaMemcpyHostToDevice);
//Dimensionamiento del grid para la función kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
inputStage_kernel<<<gridDim,blockDim>>>(N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Copia del arreglo "y" del device hacia el host
cudaMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
}
//función kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(const long N, const int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y)
{
int n1,n2;
cuFloatComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generación de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1);
}
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0);
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Función auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignación de memoria en el device para "z"
cudaMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "z"
z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Asignación de memoria en el device para "in" y "out"
cudaMalloc((void**)&in,sizeof(cufftComplex)*P*Dip*Dop);
cudaMalloc((void**)&out,sizeof(cufftComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
cudaMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se crea un plan
cufftHandle plan;
cufftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,CUFFT_C2C,Dip*Dop);
//Ejecución del plan
cufftExecC2C(plan,in,out,CUFFT_FORWARD);
//Se destruye el plan
cufftDestroy(plan);
//Se copian los datos del arreglo "out" al arreglo "z_device"
cudaMemcpy(z_device,out,sizeof(cufftComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
cudaMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
}
//Función auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int m;
//Asignación de memoria en el device para "X"
cudaMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "X"
X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo);
//Dimensionamiento del grid para la función kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
outputStage_kernel<<<gridDim,blockDim>>>(N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
cudaMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,cudaMemcpyDeviceToHost);
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
}
//función kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(const long N,const int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X)
{
//Declaración de variables locales
int n1,k_aux,k1,k2,a,b;
cuFloatComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Cálculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Cálculo de X(k) para 0<=k<=Dip-1.
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
else
{
if(Dop <= 4)
{
//Usando el método directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
}
else
{
//Usando el método filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t4 = cuCsubf(t3,t2);
}
if(n1 == (Dop-1))
{
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
}
}
}
}
}
}
}
|
dd8427560811a3e543b7ebf6243d8acba633a1ee.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdlib>
#include <iostream>
#include <vector>
#include <map>
#include <type_traits>
#include <memory>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/gather.h>
#include "gtest/gtest.h"
#include "gmock/gmock.h"
#include <cudf.h>
#include <cudf/functions.h>
#include <dataframe/cudf_table.cuh>
#include <hash/hash_functions.cuh>
#include <utilities/int_fastdiv.h>
#include <rmm/thrust_rmm_allocator.h>
#include "tests/utilities/cudf_test_utils.cuh"
#include "tests/utilities/cudf_test_fixtures.h"
// Vector set to use rmmAlloc and rmmFree.
template <typename T>
using Vector = thrust::device_vector<T, rmm_allocator<T>>;
template <template <typename> class hash_function,
typename size_type>
struct row_partition_mapper
{
__device__
row_partition_mapper(gdf_table<size_type> const & table_to_hash, const size_type _num_partitions)
: the_table{table_to_hash}, num_partitions{_num_partitions}
{}
__device__
hash_value_type operator()(size_type row_index) const
{
return the_table.template hash_row<hash_function>(row_index) % num_partitions;
}
gdf_table<size_type> const & the_table;
// Using int_fastdiv can return results different from using the normal modulus
// operation, therefore we need to use it in result verfication as well
size_type num_partitions;
};
// Put all repeated setup and validation stuff here
template <class test_parameters>
struct HashPartitionTest : public GdfTest
{
constexpr static gdf_hash_func gdf_hash_function = test_parameters::gdf_hash_function;
const int num_cols_to_hash = test_parameters::num_cols_to_hash;
std::array<int, test_parameters::num_cols_to_hash> cols_to_hash = test_parameters::cols_to_hash;
// multi_column_t is a tuple of vectors. The number of vectors in the tuple
// determines the number of columns, and the value_type of each
// vector determines the data type of the column
using multi_column_t = typename test_parameters::multi_column_t;
multi_column_t input_columns;
multi_column_t output_columns;
// Containers for unique_ptrs to gdf_columns
// unique_ptrs are used to automate freeing device memory
std::vector<gdf_col_pointer> gdf_input_columns;
std::vector<gdf_col_pointer> gdf_output_columns;
// Containers for the raw pointers to the gdf_columns
std::vector<gdf_column*> raw_gdf_input_columns;
std::vector<gdf_column*> raw_gdf_output_columns;
HashPartitionTest()
{
// Use constant seed so the psuedo-random order is the same each time
// Each time the class is constructed a new constant seed is used
static size_t number_of_instantiations{0};
std::srand(number_of_instantiations++);
}
~HashPartitionTest()
{
}
void create_input( size_t num_rows, size_t max_value,
bool print = false)
{
initialize_tuple(input_columns, num_rows, max_value);
initialize_tuple(output_columns, num_rows, max_value);
gdf_input_columns = initialize_gdf_columns(input_columns);
gdf_output_columns = initialize_gdf_columns(output_columns);
// Fill vector of raw pointers to gdf_columns
for(auto const& c : gdf_input_columns){
this->raw_gdf_input_columns.push_back(c.get());
}
for(auto const& c : gdf_output_columns){
this->raw_gdf_output_columns.push_back(c.get());
}
if(print)
{
std::cout << "Input column(s) created. Size: "
<< std::get<0>(input_columns).size() << std::endl;
print_tuple(input_columns);
}
}
std::vector<int> compute_gdf_result(const int num_partitions, bool print = false)
{
const int num_columns = std::tuple_size<multi_column_t>::value;
gdf_error result_error{GDF_SUCCESS};
gdf_column ** gdf_input_columns = raw_gdf_input_columns.data();
gdf_column ** gdf_output_columns = raw_gdf_output_columns.data();
std::vector<int> partition_offsets(num_partitions,0);
result_error = gdf_hash_partition(num_columns,
gdf_input_columns,
this->cols_to_hash.data(),
this->num_cols_to_hash,
num_partitions,
gdf_output_columns,
partition_offsets.data(),
gdf_hash_function);
EXPECT_EQ(GDF_SUCCESS, result_error);
if(print)
{
std::cout << "Partition offsets: ";
for(int i = 0; i < num_partitions; ++i)
{
std::cout << partition_offsets[i] << " ";
}
std::cout << std::endl;
}
return partition_offsets;
}
void verify_gdf_result(int num_partitions, std::vector<int> partition_offsets, bool print = false)
{
std::vector<gdf_column*> gdf_cols_to_hash;
for(int i = 0; i < num_cols_to_hash; ++i)
{
gdf_cols_to_hash.push_back(raw_gdf_output_columns[cols_to_hash[i]]);
}
// Create a table from the gdf output of only the columns that were hashed
std::unique_ptr< gdf_table<int> > table_to_hash{new gdf_table<int>(num_cols_to_hash, gdf_cols_to_hash.data())};
Vector<int> row_partition_numbers(table_to_hash->get_column_length());
// Compute the partition number for every row in the result
switch(gdf_hash_function)
{
case GDF_HASH_MURMUR3:
{
thrust::tabulate(thrust::device,
row_partition_numbers.begin(),
row_partition_numbers.end(),
row_partition_mapper<MurmurHash3_32,int>(*table_to_hash,num_partitions));
break;
}
case GDF_HASH_IDENTITY:
{
thrust::tabulate(thrust::device,
row_partition_numbers.begin(),
row_partition_numbers.end(),
row_partition_mapper<IdentityHash,int>(*table_to_hash,num_partitions));
break;
}
default:
std::cerr << "Invalid GDF hash function.\n";
}
std::vector<int> host_row_partition_numbers(table_to_hash->get_column_length());
hipMemcpy(host_row_partition_numbers.data(),
row_partition_numbers.data().get(),
table_to_hash->get_column_length() * sizeof(int),
hipMemcpyDeviceToHost);
if(print)
{
std::cout << "Row partition numbers:\n";
std::copy(host_row_partition_numbers.begin(),
host_row_partition_numbers.end(),
std::ostream_iterator<int>(std::cout, ", "));
std::cout << std::endl;
}
// Check that the partition number for every row is correct
for(int partition_number = 0; partition_number < num_partitions; ++partition_number)
{
const int partition_start = partition_offsets[partition_number];
int partition_stop{0};
if(partition_number < (num_partitions - 1))
{
partition_stop = partition_offsets[partition_number + 1];
}
// The end of the last partition is the end of the table
else
{
partition_stop = table_to_hash->get_column_length();
}
// Everything in the current partition should have the same partition
// number
for(int i = partition_start; i < partition_stop; ++i)
{
EXPECT_EQ(partition_number, host_row_partition_numbers[i]) << "Partition number for row: " << i << " doesn't match!";
}
}
}
};
template< typename tuple_of_vectors,
gdf_hash_func hash,
int... cols>
struct TestParameters
{
static_assert((std::tuple_size<tuple_of_vectors>::value >= sizeof...(cols)),
"The number of columns to hash must be less than or equal to the total number of columns.");
// The tuple of vectors that determines the number and types of the columns
using multi_column_t = tuple_of_vectors;
// The hash function to use
constexpr static const gdf_hash_func gdf_hash_function = hash;
// The number of columns to hash
constexpr static const int num_cols_to_hash{sizeof...(cols)};
// The indices of the columns that will be hashed to determine the partitions
constexpr static const std::array<int, sizeof...(cols)> cols_to_hash{{cols...}};
};
// Using Google Tests "Type Parameterized Tests"
// Every test defined as TYPED_TEST(HashPartitionTest, *) will be run once for every instance of
// TestParameters defined below
// The number and types of columns determined by the number and types of vectors
// in the VTuple<...>
// The hash function to be used is determined by the gdf_hash_func enum
// The columns to be hashed to determine the partition assignment are the last N integer template
// arguments, where N <= the number of columns specified in the VTuple
typedef ::testing::Types< TestParameters< VTuple<int32_t>, GDF_HASH_IDENTITY, 0 >,
TestParameters< VTuple<int32_t, int32_t>, GDF_HASH_MURMUR3, 0, 1>,
TestParameters< VTuple<float, double>, GDF_HASH_MURMUR3, 1>,
TestParameters< VTuple<int64_t, int32_t>, GDF_HASH_MURMUR3, 1>,
TestParameters< VTuple<int64_t, int64_t>, GDF_HASH_MURMUR3, 0, 1>,
TestParameters< VTuple<int64_t, int64_t, float, double>, GDF_HASH_IDENTITY, 2, 3>,
TestParameters< VTuple<uint32_t, double, int32_t, double>, GDF_HASH_MURMUR3, 0, 2, 3>,
TestParameters< VTuple<int64_t, int64_t, float, double>, GDF_HASH_MURMUR3, 1, 3>,
TestParameters< VTuple<int64_t, int64_t>, GDF_HASH_MURMUR3, 0, 1>,
TestParameters< VTuple<float, int32_t>, GDF_HASH_MURMUR3, 0>
>Implementations;
TYPED_TEST_CASE(HashPartitionTest, Implementations);
TYPED_TEST(HashPartitionTest, ExampleTest)
{
const int num_partitions = 5;
this->create_input(100, 100);
std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions);
this->verify_gdf_result(num_partitions, partition_offsets);
}
TYPED_TEST(HashPartitionTest, OnePartition)
{
const int num_partitions = 1;
this->create_input(100000, 1000);
std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions);
this->verify_gdf_result(num_partitions, partition_offsets);
}
TYPED_TEST(HashPartitionTest, TenPartitions)
{
const int num_partitions = 10;
this->create_input(1000000, 1000);
std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions);
this->verify_gdf_result(num_partitions, partition_offsets);
}
TYPED_TEST(HashPartitionTest, EightPartitions)
{
const int num_partitions = 8;
this->create_input(1000000, 1000);
std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions);
this->verify_gdf_result(num_partitions, partition_offsets);
}
TYPED_TEST(HashPartitionTest, 257Partitions)
{
const int num_partitions = 257;
this->create_input(1000000, 1000);
std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions);
this->verify_gdf_result(num_partitions, partition_offsets);
}
|
dd8427560811a3e543b7ebf6243d8acba633a1ee.cu
|
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdlib>
#include <iostream>
#include <vector>
#include <map>
#include <type_traits>
#include <memory>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/gather.h>
#include "gtest/gtest.h"
#include "gmock/gmock.h"
#include <cudf.h>
#include <cudf/functions.h>
#include <dataframe/cudf_table.cuh>
#include <hash/hash_functions.cuh>
#include <utilities/int_fastdiv.h>
#include <rmm/thrust_rmm_allocator.h>
#include "tests/utilities/cudf_test_utils.cuh"
#include "tests/utilities/cudf_test_fixtures.h"
// Vector set to use rmmAlloc and rmmFree.
template <typename T>
using Vector = thrust::device_vector<T, rmm_allocator<T>>;
template <template <typename> class hash_function,
typename size_type>
struct row_partition_mapper
{
__device__
row_partition_mapper(gdf_table<size_type> const & table_to_hash, const size_type _num_partitions)
: the_table{table_to_hash}, num_partitions{_num_partitions}
{}
__device__
hash_value_type operator()(size_type row_index) const
{
return the_table.template hash_row<hash_function>(row_index) % num_partitions;
}
gdf_table<size_type> const & the_table;
// Using int_fastdiv can return results different from using the normal modulus
// operation, therefore we need to use it in result verfication as well
size_type num_partitions;
};
// Put all repeated setup and validation stuff here
template <class test_parameters>
struct HashPartitionTest : public GdfTest
{
constexpr static gdf_hash_func gdf_hash_function = test_parameters::gdf_hash_function;
const int num_cols_to_hash = test_parameters::num_cols_to_hash;
std::array<int, test_parameters::num_cols_to_hash> cols_to_hash = test_parameters::cols_to_hash;
// multi_column_t is a tuple of vectors. The number of vectors in the tuple
// determines the number of columns, and the value_type of each
// vector determines the data type of the column
using multi_column_t = typename test_parameters::multi_column_t;
multi_column_t input_columns;
multi_column_t output_columns;
// Containers for unique_ptrs to gdf_columns
// unique_ptrs are used to automate freeing device memory
std::vector<gdf_col_pointer> gdf_input_columns;
std::vector<gdf_col_pointer> gdf_output_columns;
// Containers for the raw pointers to the gdf_columns
std::vector<gdf_column*> raw_gdf_input_columns;
std::vector<gdf_column*> raw_gdf_output_columns;
HashPartitionTest()
{
// Use constant seed so the psuedo-random order is the same each time
// Each time the class is constructed a new constant seed is used
static size_t number_of_instantiations{0};
std::srand(number_of_instantiations++);
}
~HashPartitionTest()
{
}
void create_input( size_t num_rows, size_t max_value,
bool print = false)
{
initialize_tuple(input_columns, num_rows, max_value);
initialize_tuple(output_columns, num_rows, max_value);
gdf_input_columns = initialize_gdf_columns(input_columns);
gdf_output_columns = initialize_gdf_columns(output_columns);
// Fill vector of raw pointers to gdf_columns
for(auto const& c : gdf_input_columns){
this->raw_gdf_input_columns.push_back(c.get());
}
for(auto const& c : gdf_output_columns){
this->raw_gdf_output_columns.push_back(c.get());
}
if(print)
{
std::cout << "Input column(s) created. Size: "
<< std::get<0>(input_columns).size() << std::endl;
print_tuple(input_columns);
}
}
std::vector<int> compute_gdf_result(const int num_partitions, bool print = false)
{
const int num_columns = std::tuple_size<multi_column_t>::value;
gdf_error result_error{GDF_SUCCESS};
gdf_column ** gdf_input_columns = raw_gdf_input_columns.data();
gdf_column ** gdf_output_columns = raw_gdf_output_columns.data();
std::vector<int> partition_offsets(num_partitions,0);
result_error = gdf_hash_partition(num_columns,
gdf_input_columns,
this->cols_to_hash.data(),
this->num_cols_to_hash,
num_partitions,
gdf_output_columns,
partition_offsets.data(),
gdf_hash_function);
EXPECT_EQ(GDF_SUCCESS, result_error);
if(print)
{
std::cout << "Partition offsets: ";
for(int i = 0; i < num_partitions; ++i)
{
std::cout << partition_offsets[i] << " ";
}
std::cout << std::endl;
}
return partition_offsets;
}
void verify_gdf_result(int num_partitions, std::vector<int> partition_offsets, bool print = false)
{
std::vector<gdf_column*> gdf_cols_to_hash;
for(int i = 0; i < num_cols_to_hash; ++i)
{
gdf_cols_to_hash.push_back(raw_gdf_output_columns[cols_to_hash[i]]);
}
// Create a table from the gdf output of only the columns that were hashed
std::unique_ptr< gdf_table<int> > table_to_hash{new gdf_table<int>(num_cols_to_hash, gdf_cols_to_hash.data())};
Vector<int> row_partition_numbers(table_to_hash->get_column_length());
// Compute the partition number for every row in the result
switch(gdf_hash_function)
{
case GDF_HASH_MURMUR3:
{
thrust::tabulate(thrust::device,
row_partition_numbers.begin(),
row_partition_numbers.end(),
row_partition_mapper<MurmurHash3_32,int>(*table_to_hash,num_partitions));
break;
}
case GDF_HASH_IDENTITY:
{
thrust::tabulate(thrust::device,
row_partition_numbers.begin(),
row_partition_numbers.end(),
row_partition_mapper<IdentityHash,int>(*table_to_hash,num_partitions));
break;
}
default:
std::cerr << "Invalid GDF hash function.\n";
}
std::vector<int> host_row_partition_numbers(table_to_hash->get_column_length());
cudaMemcpy(host_row_partition_numbers.data(),
row_partition_numbers.data().get(),
table_to_hash->get_column_length() * sizeof(int),
cudaMemcpyDeviceToHost);
if(print)
{
std::cout << "Row partition numbers:\n";
std::copy(host_row_partition_numbers.begin(),
host_row_partition_numbers.end(),
std::ostream_iterator<int>(std::cout, ", "));
std::cout << std::endl;
}
// Check that the partition number for every row is correct
for(int partition_number = 0; partition_number < num_partitions; ++partition_number)
{
const int partition_start = partition_offsets[partition_number];
int partition_stop{0};
if(partition_number < (num_partitions - 1))
{
partition_stop = partition_offsets[partition_number + 1];
}
// The end of the last partition is the end of the table
else
{
partition_stop = table_to_hash->get_column_length();
}
// Everything in the current partition should have the same partition
// number
for(int i = partition_start; i < partition_stop; ++i)
{
EXPECT_EQ(partition_number, host_row_partition_numbers[i]) << "Partition number for row: " << i << " doesn't match!";
}
}
}
};
template< typename tuple_of_vectors,
gdf_hash_func hash,
int... cols>
struct TestParameters
{
static_assert((std::tuple_size<tuple_of_vectors>::value >= sizeof...(cols)),
"The number of columns to hash must be less than or equal to the total number of columns.");
// The tuple of vectors that determines the number and types of the columns
using multi_column_t = tuple_of_vectors;
// The hash function to use
constexpr static const gdf_hash_func gdf_hash_function = hash;
// The number of columns to hash
constexpr static const int num_cols_to_hash{sizeof...(cols)};
// The indices of the columns that will be hashed to determine the partitions
constexpr static const std::array<int, sizeof...(cols)> cols_to_hash{{cols...}};
};
// Using Google Tests "Type Parameterized Tests"
// Every test defined as TYPED_TEST(HashPartitionTest, *) will be run once for every instance of
// TestParameters defined below
// The number and types of columns determined by the number and types of vectors
// in the VTuple<...>
// The hash function to be used is determined by the gdf_hash_func enum
// The columns to be hashed to determine the partition assignment are the last N integer template
// arguments, where N <= the number of columns specified in the VTuple
typedef ::testing::Types< TestParameters< VTuple<int32_t>, GDF_HASH_IDENTITY, 0 >,
TestParameters< VTuple<int32_t, int32_t>, GDF_HASH_MURMUR3, 0, 1>,
TestParameters< VTuple<float, double>, GDF_HASH_MURMUR3, 1>,
TestParameters< VTuple<int64_t, int32_t>, GDF_HASH_MURMUR3, 1>,
TestParameters< VTuple<int64_t, int64_t>, GDF_HASH_MURMUR3, 0, 1>,
TestParameters< VTuple<int64_t, int64_t, float, double>, GDF_HASH_IDENTITY, 2, 3>,
TestParameters< VTuple<uint32_t, double, int32_t, double>, GDF_HASH_MURMUR3, 0, 2, 3>,
TestParameters< VTuple<int64_t, int64_t, float, double>, GDF_HASH_MURMUR3, 1, 3>,
TestParameters< VTuple<int64_t, int64_t>, GDF_HASH_MURMUR3, 0, 1>,
TestParameters< VTuple<float, int32_t>, GDF_HASH_MURMUR3, 0>
>Implementations;
TYPED_TEST_CASE(HashPartitionTest, Implementations);
TYPED_TEST(HashPartitionTest, ExampleTest)
{
const int num_partitions = 5;
this->create_input(100, 100);
std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions);
this->verify_gdf_result(num_partitions, partition_offsets);
}
TYPED_TEST(HashPartitionTest, OnePartition)
{
const int num_partitions = 1;
this->create_input(100000, 1000);
std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions);
this->verify_gdf_result(num_partitions, partition_offsets);
}
TYPED_TEST(HashPartitionTest, TenPartitions)
{
const int num_partitions = 10;
this->create_input(1000000, 1000);
std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions);
this->verify_gdf_result(num_partitions, partition_offsets);
}
TYPED_TEST(HashPartitionTest, EightPartitions)
{
const int num_partitions = 8;
this->create_input(1000000, 1000);
std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions);
this->verify_gdf_result(num_partitions, partition_offsets);
}
TYPED_TEST(HashPartitionTest, 257Partitions)
{
const int num_partitions = 257;
this->create_input(1000000, 1000);
std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions);
this->verify_gdf_result(num_partitions, partition_offsets);
}
|
fc840e2328efa600be7ad461d2b32383295d3fef.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "helper.h"
#include <iostream>
#include <hip/hip_runtime.h>
// TODO: cpp11
double getSeconds() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6);
}
void checkError(hipError_t err, const char *msg) {
if (err != hipSuccess) {
std::cout << hipGetErrorString(err) << " " << msg << std::endl;
exit(-1);
}
}
// Return an array of size 27 with indices of neighbors
// neighbors* must be of length 27 ints
__host__ __device__ void get_neighbors(const int index, int *neighbors, const Params *params) {
int cell_id2 = index / (params->cells0 * params->cells1);
int cell_id1 = (index - cell_id2 * (params->cells0 * params->cells1)) / params->cells0;
int cell_id0 = index - cell_id2 * (params->cells0 * params->cells1) - cell_id1 * params->cells0;
if (cell_id0 + cell_id1 * params->cells0 + cell_id2 * (params->cells0 * params->cells1) != index) printf("error indexing \n");
int neighbour_id0, neighbour_id1, neighbour_id2;
int counter = 0;
for (int offset2 = -1; offset2 <= 1; ++offset2) {
neighbour_id2 = cell_id2 + offset2;
if (cell_id2 + offset2 < 0) {
neighbour_id2 = params->cells2 - 1;
} else if (cell_id2 + offset2 >= params->cells2) {
neighbour_id2 = 0;
}
for (int offset1 = -1; offset1 <= 1; ++offset1) {
neighbour_id1 = cell_id1 + offset1;
if (cell_id1 + offset1 < 0) {
neighbour_id1 = params->cells1 - 1;
} else if (cell_id1 + offset1 >= params->cells1) {
neighbour_id1 = 0;
}
for (int offset0 = -1; offset0 <= 1; ++offset0) {
neighbour_id0 = cell_id0 + offset0;
if (cell_id0 + offset0 < 0) {
neighbour_id0 = params->cells0 - 1;
} else if (cell_id0 + offset0 >= params->cells0) {
neighbour_id0 = 0;
}
neighbors[counter++] = neighbour_id0 + neighbour_id1 * params->cells0 + neighbour_id2 * (params->cells0 * params->cells1);
}
}
}
}
__host__ __device__ int calc_cell_index(const real x0, const real x1, const real x2, const Params *params) {
int cell_id0 = (x0 - params->x0_min) / params->length0;
int cell_id1 = (x1 - params->x1_min) / params->length1;
int cell_id2 = (x2 - params->x2_min) / params->length2;
if (cell_id2 < 0 || cell_id0 < 0 || cell_id1 < 0) printf("error in cell index calc\n");;
return cell_id2 * (params->cells0 * params->cells1) + cell_id1 * (params->cells0) + cell_id0;
}
__host__ __device__ void update_pos(const int &id, const Params *params, Particle *particles) {
real x, y, z;
if (particles[id].m != -1.) {
x = particles[id].x0 + params->timestep_length * particles[id].v0 +
(particles[id].force0 * pow(params->timestep_length, 2.)) / (2. * particles[id].m);
if (x < params->x0_min) {
particles[id].x0 = x + (params->x0_max - params->x0_min);
} else if (x > params->x0_max) {
particles[id].x0 = x - (params->x0_max - params->x0_min);
} else {
particles[id].x0 = x;
}
y = particles[id].x1 + params->timestep_length * particles[id].v1 +
(particles[id].force1 * pow(params->timestep_length, 2.)) / (2. * particles[id].m);
if (y < params->x1_min) {
particles[id].x1 = y + (params->x1_max - params->x1_min);
} else if (y > params->x1_max) {
particles[id].x1 = y - (params->x1_max - params->x1_min);
} else {
particles[id].x1 = y;
}
z = particles[id].x2 + params->timestep_length * particles[id].v2 +
(particles[id].force2 * pow(params->timestep_length, 2.)) / (2. * particles[id].m);
if (z < params->x2_min) {
particles[id].x2 = z + (params->x2_max - params->x2_min);
} else if (z > params->x2_max) {
particles[id].x2 = z - (params->x2_max - params->x2_min);
} else {
particles[id].x2 = z;
}
}
particles[id].force0_old = particles[id].force0;
particles[id].force1_old = particles[id].force1;
particles[id].force2_old = particles[id].force2;
}
__host__ __device__ void calc_velocity(const int &id, const Params *params, Particle *particles) {
if (particles[id].m != -1.) {
particles[id].v0 =
particles[id].v0 + ((particles[id].force0_old + particles[id].force0) * params->timestep_length) / (2. * particles[id].m);
particles[id].v1 =
particles[id].v1 + ((particles[id].force1_old + particles[id].force1) * params->timestep_length) / (2. * particles[id].m);
particles[id].v2 =
particles[id].v2 + ((particles[id].force2_old + particles[id].force2) * params->timestep_length) / (2. * particles[id].m);
}
}
__host__ __device__ void calc_force(const int id, const Params *params, Particle *particles, const int *linked_cells, const int *linked_particles) {
int neighbors[27];
particles[id].force0 = 0;
particles[id].force1 = 0;
particles[id].force2 = 0;
// Get current cell index of this particle
int cell_index = calc_cell_index((particles[id].x0), (particles[id].x1), (particles[id].x2), params);
// Get the 27 indices of the neighboring cells
get_neighbors(cell_index, neighbors, params);
// Loop over neighbour particles (in neighbour cells)
for (int cell = 0; cell < 27; ++cell) {
int id_b = linked_cells[neighbors[cell]]; // index of first particle in
// linked list
while (id_b != -1) {
if (id_b != id) {
//add_collision_force(params, particles, id, id_b);
add_lj_force(params, particles, id, id_b);
}
id_b = linked_particles[id_b];
}
}
/*
// Brute
for (int id_b = 0; id_b < params->num_part; id_b++) {
if (id_b != id) add_lj_force(params, particles, id, id_b);
}
*/
}
__host__ __device__ real ljpotential(const real &n, const real& sigma, const real& epsilon) {
return (24. * epsilon) / pow(n, 2.) * pow(sigma / n, 6.) * (2. * pow(sigma / n, 6.) - 1.);
}
__host__ __device__ void add_lj_force(const Params *params, Particle *particles, const int &id, const int &i) {
real r0, r1, r2;
real dist0 = params->x0_max - params->x0_min;
real dist1 = params->x1_max - params->x1_min;
real dist2 = params->x2_max - params->x2_min;
real r_ij;
real lenard;
r0 = particles[id].x0 - particles[i].x0;
r1 = particles[id].x1 - particles[i].x1;
r2 = particles[id].x2 - particles[i].x2;
// Minimum image criteria
if (r0 > dist0 * 0.5) r0 = r0 - dist0;
if (r1 > dist1 * 0.5) r1 = r1 - dist1;
if (r2 > dist2 * 0.5) r2 = r2 - dist2;
if (r0 <= -dist0 * 0.5) r0 = r0 + dist0;
if (r1 <= -dist1 * 0.5) r1 = r1 + dist1;
if (r2 <= -dist2 * 0.5) r2 = r2 + dist2;
r_ij = sqrt(r0*r0 + r1*r1 + r2*r2);
if(r_ij <= params->r_cut){
lenard = ljpotential(r_ij, params->sigma, params->epsilon);
particles[id].force0 += lenard*r0;
particles[id].force1 += lenard*r1;
particles[id].force2 += lenard*r2;
}
}
|
fc840e2328efa600be7ad461d2b32383295d3fef.cu
|
#include "helper.h"
#include <iostream>
#include <cuda_runtime.h>
// TODO: cpp11
double getSeconds() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6);
}
void checkError(cudaError_t err, const char *msg) {
if (err != cudaSuccess) {
std::cout << cudaGetErrorString(err) << " " << msg << std::endl;
exit(-1);
}
}
// Return an array of size 27 with indices of neighbors
// neighbors* must be of length 27 ints
__host__ __device__ void get_neighbors(const int index, int *neighbors, const Params *params) {
int cell_id2 = index / (params->cells0 * params->cells1);
int cell_id1 = (index - cell_id2 * (params->cells0 * params->cells1)) / params->cells0;
int cell_id0 = index - cell_id2 * (params->cells0 * params->cells1) - cell_id1 * params->cells0;
if (cell_id0 + cell_id1 * params->cells0 + cell_id2 * (params->cells0 * params->cells1) != index) printf("error indexing \n");
int neighbour_id0, neighbour_id1, neighbour_id2;
int counter = 0;
for (int offset2 = -1; offset2 <= 1; ++offset2) {
neighbour_id2 = cell_id2 + offset2;
if (cell_id2 + offset2 < 0) {
neighbour_id2 = params->cells2 - 1;
} else if (cell_id2 + offset2 >= params->cells2) {
neighbour_id2 = 0;
}
for (int offset1 = -1; offset1 <= 1; ++offset1) {
neighbour_id1 = cell_id1 + offset1;
if (cell_id1 + offset1 < 0) {
neighbour_id1 = params->cells1 - 1;
} else if (cell_id1 + offset1 >= params->cells1) {
neighbour_id1 = 0;
}
for (int offset0 = -1; offset0 <= 1; ++offset0) {
neighbour_id0 = cell_id0 + offset0;
if (cell_id0 + offset0 < 0) {
neighbour_id0 = params->cells0 - 1;
} else if (cell_id0 + offset0 >= params->cells0) {
neighbour_id0 = 0;
}
neighbors[counter++] = neighbour_id0 + neighbour_id1 * params->cells0 + neighbour_id2 * (params->cells0 * params->cells1);
}
}
}
}
__host__ __device__ int calc_cell_index(const real x0, const real x1, const real x2, const Params *params) {
int cell_id0 = (x0 - params->x0_min) / params->length0;
int cell_id1 = (x1 - params->x1_min) / params->length1;
int cell_id2 = (x2 - params->x2_min) / params->length2;
if (cell_id2 < 0 || cell_id0 < 0 || cell_id1 < 0) printf("error in cell index calc\n");;
return cell_id2 * (params->cells0 * params->cells1) + cell_id1 * (params->cells0) + cell_id0;
}
__host__ __device__ void update_pos(const int &id, const Params *params, Particle *particles) {
real x, y, z;
if (particles[id].m != -1.) {
x = particles[id].x0 + params->timestep_length * particles[id].v0 +
(particles[id].force0 * pow(params->timestep_length, 2.)) / (2. * particles[id].m);
if (x < params->x0_min) {
particles[id].x0 = x + (params->x0_max - params->x0_min);
} else if (x > params->x0_max) {
particles[id].x0 = x - (params->x0_max - params->x0_min);
} else {
particles[id].x0 = x;
}
y = particles[id].x1 + params->timestep_length * particles[id].v1 +
(particles[id].force1 * pow(params->timestep_length, 2.)) / (2. * particles[id].m);
if (y < params->x1_min) {
particles[id].x1 = y + (params->x1_max - params->x1_min);
} else if (y > params->x1_max) {
particles[id].x1 = y - (params->x1_max - params->x1_min);
} else {
particles[id].x1 = y;
}
z = particles[id].x2 + params->timestep_length * particles[id].v2 +
(particles[id].force2 * pow(params->timestep_length, 2.)) / (2. * particles[id].m);
if (z < params->x2_min) {
particles[id].x2 = z + (params->x2_max - params->x2_min);
} else if (z > params->x2_max) {
particles[id].x2 = z - (params->x2_max - params->x2_min);
} else {
particles[id].x2 = z;
}
}
particles[id].force0_old = particles[id].force0;
particles[id].force1_old = particles[id].force1;
particles[id].force2_old = particles[id].force2;
}
__host__ __device__ void calc_velocity(const int &id, const Params *params, Particle *particles) {
if (particles[id].m != -1.) {
particles[id].v0 =
particles[id].v0 + ((particles[id].force0_old + particles[id].force0) * params->timestep_length) / (2. * particles[id].m);
particles[id].v1 =
particles[id].v1 + ((particles[id].force1_old + particles[id].force1) * params->timestep_length) / (2. * particles[id].m);
particles[id].v2 =
particles[id].v2 + ((particles[id].force2_old + particles[id].force2) * params->timestep_length) / (2. * particles[id].m);
}
}
__host__ __device__ void calc_force(const int id, const Params *params, Particle *particles, const int *linked_cells, const int *linked_particles) {
int neighbors[27];
particles[id].force0 = 0;
particles[id].force1 = 0;
particles[id].force2 = 0;
// Get current cell index of this particle
int cell_index = calc_cell_index((particles[id].x0), (particles[id].x1), (particles[id].x2), params);
// Get the 27 indices of the neighboring cells
get_neighbors(cell_index, neighbors, params);
// Loop over neighbour particles (in neighbour cells)
for (int cell = 0; cell < 27; ++cell) {
int id_b = linked_cells[neighbors[cell]]; // index of first particle in
// linked list
while (id_b != -1) {
if (id_b != id) {
//add_collision_force(params, particles, id, id_b);
add_lj_force(params, particles, id, id_b);
}
id_b = linked_particles[id_b];
}
}
/*
// Brute
for (int id_b = 0; id_b < params->num_part; id_b++) {
if (id_b != id) add_lj_force(params, particles, id, id_b);
}
*/
}
__host__ __device__ real ljpotential(const real &n, const real& sigma, const real& epsilon) {
return (24. * epsilon) / pow(n, 2.) * pow(sigma / n, 6.) * (2. * pow(sigma / n, 6.) - 1.);
}
__host__ __device__ void add_lj_force(const Params *params, Particle *particles, const int &id, const int &i) {
real r0, r1, r2;
real dist0 = params->x0_max - params->x0_min;
real dist1 = params->x1_max - params->x1_min;
real dist2 = params->x2_max - params->x2_min;
real r_ij;
real lenard;
r0 = particles[id].x0 - particles[i].x0;
r1 = particles[id].x1 - particles[i].x1;
r2 = particles[id].x2 - particles[i].x2;
// Minimum image criteria
if (r0 > dist0 * 0.5) r0 = r0 - dist0;
if (r1 > dist1 * 0.5) r1 = r1 - dist1;
if (r2 > dist2 * 0.5) r2 = r2 - dist2;
if (r0 <= -dist0 * 0.5) r0 = r0 + dist0;
if (r1 <= -dist1 * 0.5) r1 = r1 + dist1;
if (r2 <= -dist2 * 0.5) r2 = r2 + dist2;
r_ij = sqrt(r0*r0 + r1*r1 + r2*r2);
if(r_ij <= params->r_cut){
lenard = ljpotential(r_ij, params->sigma, params->epsilon);
particles[id].force0 += lenard*r0;
particles[id].force1 += lenard*r1;
particles[id].force2 += lenard*r2;
}
}
|
699eedf9bef0d567821f0c0a38adc2fdfbd0b774.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "shiftRightPixels.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int16_t *bayImg = NULL;
hipMalloc(&bayImg, XSIZE*YSIZE);
size_t width = XSIZE;
size_t height = YSIZE;
int bppMult = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
shiftRightPixels), dim3(gridBlock),dim3(threadBlock), 0, 0, bayImg,width,height,bppMult);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
shiftRightPixels), dim3(gridBlock),dim3(threadBlock), 0, 0, bayImg,width,height,bppMult);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
shiftRightPixels), dim3(gridBlock),dim3(threadBlock), 0, 0, bayImg,width,height,bppMult);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
699eedf9bef0d567821f0c0a38adc2fdfbd0b774.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "shiftRightPixels.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int16_t *bayImg = NULL;
cudaMalloc(&bayImg, XSIZE*YSIZE);
size_t width = XSIZE;
size_t height = YSIZE;
int bppMult = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
shiftRightPixels<<<gridBlock,threadBlock>>>(bayImg,width,height,bppMult);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
shiftRightPixels<<<gridBlock,threadBlock>>>(bayImg,width,height,bppMult);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
shiftRightPixels<<<gridBlock,threadBlock>>>(bayImg,width,height,bppMult);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
e63cdd3deb5370c4c4bbccb04523749073607ec4.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* utilities.cu
*
* Created on: Jan 21, 2012
* Author: C. Richard Fisel
*/
#include <hip/hip_runtime.h>
#include "constants.h"
#include "utilities.h"
//__forceinline__
__device__ bool lock_agent(int iAgentID, int* piaBits, const int iPopulation)
{
bool lockSuccess = false;
if (iAgentID > -1 && iAgentID < iPopulation) {
int iTemp = piaBits[iAgentID];
// if agent is unlocked...
if (iTemp&agentLockMask == 0) {
// lock if possible
int iLocked = atomicCAS(&(piaBits[iAgentID]),iTemp,iTemp|agentLockMask);
// test if the lock worked
if (iLocked == iTemp) {
lockSuccess = true;
}
}
} else {
printf("Error: bad agent id %d!\n",iAgentID);
}
return lockSuccess;
}
//__forceinline__
__device__ void unlock_agent(int iAgentID, int* piaBits, const int iPopulation)
{
if (iAgentID > -1 && iAgentID < iPopulation) {
// make a copy of the agent's bits, but indicating unlocked
int iTemp = piaBits[iAgentID];
iTemp &= ~agentLockMask;
// switch unlocked copy for global value
int iUnlocked = atomicExch(&(piaBits[iAgentID]),iTemp);
} else {
printf("Error: bad agent id %d!\n",iAgentID);
}
return;
}
//__forceinline__
__device__ bool lock_location(int iAddy, int* pigBits)
{
bool lockSuccess = false;
if (iAddy < GRID_SIZE*GRID_SIZE && iAddy > -1) {
// unpack grid bits
int iTemp = pigBits[iAddy];
// test if square is unlocked
if ((iTemp&gridLockMask) == 0) {
// if so, make a copy, but indicating locked
int iTempLocked = iTemp|gridLockMask;
// now lock the address if possible
int iLocked = atomicCAS(&(pigBits[iAddy]),iTemp,iTempLocked);
// test if the lock worked
if (iLocked == iTemp) lockSuccess = true;
}
} else {
printf("Error: bad address %d!\n",iAddy);
}
return lockSuccess;
}
//__forceinline__
__device__ void unlock_location(int iAddy, int* pigBits)
{
if (iAddy < GRID_SIZE*GRID_SIZE && iAddy > -1) {
// unpack grid bits
int iTemp = pigBits[iAddy];
// set to unlocked
iTemp &= ~gridLockMask;
printf("itemp %x pigBits[iAddy] %x\n",iTemp,pigBits[iAddy]);
// switch with the global value
int iLocked = atomicExch(&(pigBits[iAddy]),iTemp);
} else {
printf("Error: bad address %d!\n",iAddy);
}
return;
}
/*
* NOTE: use only when occupancy is previously verified to be positive
* and thread contention for global memory has been suppressed (e.g. by locking)
*/
//__forceinline__
__device__ void remove_resident(int iAgentID, int iAddy, int* pigBits, int* pigResidents)
{
if (iAgentID > -1) {
// copy to local memory
unsigned int iTemp = pigBits[iAddy];
// decrement occupancy by one
iTemp -= occupancyIncrement;
// find match starting at end of list
unsigned short sOcc = (iTemp&occupancyMask)>>occupancyShift;
short k = sOcc;
// remove current id - if not at the end, replace it with the one from the end and store -1 at end
if (pigResidents[iAddy*MAX_OCCUPANCY+k] == iAgentID) {
pigResidents[iAddy*MAX_OCCUPANCY+k] = -1;
} else {
while (pigResidents[iAddy*MAX_OCCUPANCY+k] != iAgentID && k >= 0) {k--;}
if (k != sOcc) {
pigResidents[iAddy*MAX_OCCUPANCY+k] = pigResidents[iAddy*MAX_OCCUPANCY+sOcc];
pigResidents[iAddy*MAX_OCCUPANCY+sOcc] = -1;
}
}
pigBits[iAddy] = iTemp;
} else {
printf("Error: agent id %d at addy %d is negative!\n",iAgentID,iAddy);
}
return;
}
/*
* NOTE: use only when occupancy is previously verified to be less than MAX_OCCUPANCY
* and thread contention for global memory has been suppressed (e.g. by locking).
* NOTE also that GRID_SIZE is a power of 2.
*/
//__forceinline__
__device__ void add_resident(int iAgentID, int iAddy, int* pigBits, int* pigResidents, short* psaX, short* psaY)
{
if (iAgentID > -1) {
// copy to local memory
unsigned int iTemp = pigBits[iAddy];
// make sure we are replacing an "empty" placemarker
if (pigResidents[iAddy*MAX_OCCUPANCY+(iTemp&occupancyMask)>>occupancyShift] == -1) {
psaX[iAgentID] = iAddy>>log2GRID_SIZE;
psaY[iAgentID] = iAddy&(GRID_SIZE-1);
pigResidents[iAddy*MAX_OCCUPANCY+(iTemp&occupancyMask)>>occupancyShift] = iAgentID;
// increment occupancy by one
iTemp += occupancyIncrement;
} else {
//otherwise notify about the error
printf ("agent %d replaced %d at addy %d \n",
iAgentID,pigResidents[iAddy*MAX_OCCUPANCY+(iTemp&occupancyMask)>>occupancyShift],iAddy);
}
pigBits[iAddy] = iTemp;
} else {
printf("Error: agent id %d at addy %d is negative!\n",iAgentID,iAddy);
}
return;
}
|
e63cdd3deb5370c4c4bbccb04523749073607ec4.cu
|
/*
* utilities.cu
*
* Created on: Jan 21, 2012
* Author: C. Richard Fisel
*/
#include <cuda.h>
#include "constants.h"
#include "utilities.h"
//__forceinline__
__device__ bool lock_agent(int iAgentID, int* piaBits, const int iPopulation)
{
bool lockSuccess = false;
if (iAgentID > -1 && iAgentID < iPopulation) {
int iTemp = piaBits[iAgentID];
// if agent is unlocked...
if (iTemp&agentLockMask == 0) {
// lock if possible
int iLocked = atomicCAS(&(piaBits[iAgentID]),iTemp,iTemp|agentLockMask);
// test if the lock worked
if (iLocked == iTemp) {
lockSuccess = true;
}
}
} else {
printf("Error: bad agent id %d!\n",iAgentID);
}
return lockSuccess;
}
//__forceinline__
__device__ void unlock_agent(int iAgentID, int* piaBits, const int iPopulation)
{
if (iAgentID > -1 && iAgentID < iPopulation) {
// make a copy of the agent's bits, but indicating unlocked
int iTemp = piaBits[iAgentID];
iTemp &= ~agentLockMask;
// switch unlocked copy for global value
int iUnlocked = atomicExch(&(piaBits[iAgentID]),iTemp);
} else {
printf("Error: bad agent id %d!\n",iAgentID);
}
return;
}
//__forceinline__
__device__ bool lock_location(int iAddy, int* pigBits)
{
bool lockSuccess = false;
if (iAddy < GRID_SIZE*GRID_SIZE && iAddy > -1) {
// unpack grid bits
int iTemp = pigBits[iAddy];
// test if square is unlocked
if ((iTemp&gridLockMask) == 0) {
// if so, make a copy, but indicating locked
int iTempLocked = iTemp|gridLockMask;
// now lock the address if possible
int iLocked = atomicCAS(&(pigBits[iAddy]),iTemp,iTempLocked);
// test if the lock worked
if (iLocked == iTemp) lockSuccess = true;
}
} else {
printf("Error: bad address %d!\n",iAddy);
}
return lockSuccess;
}
//__forceinline__
__device__ void unlock_location(int iAddy, int* pigBits)
{
if (iAddy < GRID_SIZE*GRID_SIZE && iAddy > -1) {
// unpack grid bits
int iTemp = pigBits[iAddy];
// set to unlocked
iTemp &= ~gridLockMask;
printf("itemp %x pigBits[iAddy] %x\n",iTemp,pigBits[iAddy]);
// switch with the global value
int iLocked = atomicExch(&(pigBits[iAddy]),iTemp);
} else {
printf("Error: bad address %d!\n",iAddy);
}
return;
}
/*
* NOTE: use only when occupancy is previously verified to be positive
* and thread contention for global memory has been suppressed (e.g. by locking)
*/
//__forceinline__
__device__ void remove_resident(int iAgentID, int iAddy, int* pigBits, int* pigResidents)
{
if (iAgentID > -1) {
// copy to local memory
unsigned int iTemp = pigBits[iAddy];
// decrement occupancy by one
iTemp -= occupancyIncrement;
// find match starting at end of list
unsigned short sOcc = (iTemp&occupancyMask)>>occupancyShift;
short k = sOcc;
// remove current id - if not at the end, replace it with the one from the end and store -1 at end
if (pigResidents[iAddy*MAX_OCCUPANCY+k] == iAgentID) {
pigResidents[iAddy*MAX_OCCUPANCY+k] = -1;
} else {
while (pigResidents[iAddy*MAX_OCCUPANCY+k] != iAgentID && k >= 0) {k--;}
if (k != sOcc) {
pigResidents[iAddy*MAX_OCCUPANCY+k] = pigResidents[iAddy*MAX_OCCUPANCY+sOcc];
pigResidents[iAddy*MAX_OCCUPANCY+sOcc] = -1;
}
}
pigBits[iAddy] = iTemp;
} else {
printf("Error: agent id %d at addy %d is negative!\n",iAgentID,iAddy);
}
return;
}
/*
* NOTE: use only when occupancy is previously verified to be less than MAX_OCCUPANCY
* and thread contention for global memory has been suppressed (e.g. by locking).
* NOTE also that GRID_SIZE is a power of 2.
*/
//__forceinline__
__device__ void add_resident(int iAgentID, int iAddy, int* pigBits, int* pigResidents, short* psaX, short* psaY)
{
if (iAgentID > -1) {
// copy to local memory
unsigned int iTemp = pigBits[iAddy];
// make sure we are replacing an "empty" placemarker
if (pigResidents[iAddy*MAX_OCCUPANCY+(iTemp&occupancyMask)>>occupancyShift] == -1) {
psaX[iAgentID] = iAddy>>log2GRID_SIZE;
psaY[iAgentID] = iAddy&(GRID_SIZE-1);
pigResidents[iAddy*MAX_OCCUPANCY+(iTemp&occupancyMask)>>occupancyShift] = iAgentID;
// increment occupancy by one
iTemp += occupancyIncrement;
} else {
//otherwise notify about the error
printf ("agent %d replaced %d at addy %d \n",
iAgentID,pigResidents[iAddy*MAX_OCCUPANCY+(iTemp&occupancyMask)>>occupancyShift],iAddy);
}
pigBits[iAddy] = iTemp;
} else {
printf("Error: agent id %d at addy %d is negative!\n",iAgentID,iAddy);
}
return;
}
|
d64d237d9e009ba641afe92d804091a6b3c674ff.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
// Modifies by Frost for 1D ussage
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
//for pytorch lower 1.8 version
// #include <THH/THH.h>
// #include <THH/THHAtomics.cuh>
// #include <THH/THHDeviceUtils.cuh>
//for pytorch 1.9 version
#include <ATen/ceil_div.h>
#include <ATen/hip/ThrustAllocator.h>
#include <ATen/hip/DeviceUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T linear_interpolate(const T* bottom_data,
const int height,
T t,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (t < -1.0 || t > height) {
//empty
return 0;
}
if (t <= 0) t = 0;
int t_low = (int) t;
int t_high;
// get closest integers to t
if (t_low >= height - 1) {
t_high = t_low = height - 1;
t = (T) t_low;
} else {
t_high = t_low + 1;
}
// get the distance to t
T lt = t - t_low;
T ht = 1. - lt;
// do linear interpolation
T v1 = bottom_data[t_low];
T v2 = bottom_data[t_high];
T w1 = ht, w2 = lt;
T val = (w1 * v1 + w2 * v2);
// printf("Check Linear Interpolate: w1=%f, v1=%f, w2=%f, v2=%f \n", w1, v1, w2, v2);
return val;
}
template <typename T>
__global__ void Align1DForward(const int nthreads, const T* bottom_data,
const T spatial_scale, const int channels,
const int height,
const int pooled_height,
const int sampling_ratio,
const T* bottom_rois, T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, pt) is an element in the pooled output
int pt = index % pooled_height;
int c = (index / pooled_height) % channels;
int n = index / pooled_height / channels;
//(batchsize,256,4Ts)
//bz*Nq,256,16 16bin
//rois(bz*Nq,3)
//sampling_ratio=0
// printf("Debug Main Loop: get pt, c, n are %d, %d, %d \n", pt, c, n);
const T* offset_bottom_rois = bottom_rois + n * 3;//nrois
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start = offset_bottom_rois[1] * spatial_scale;
T roi_end = offset_bottom_rois[2] * spatial_scale;
// printf("Debug roi boundary: w1, w2, is %f, %f \n", roi_start,roi_end,);
// Force malformed ROIs to be 1x1
T roi_height = max(roi_end- roi_start, (T)1.);//0-T
T bin_size = static_cast<T>(roi_height) / static_cast<T>(pooled_height); //bin
const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height; //indbatchcchannelheight4Ts
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid; // e.g. = 4
T output_val = 0.;
for (int it = 0; it < roi_bin_grid; it ++) // e.g., it = 0, 1
{
const T t = roi_start + pt * bin_size + static_cast<T>(it + .5f) * bin_size / static_cast<T>(roi_bin_grid); // e.g., 0.5, 1.5
T val = linear_interpolate(offset_bottom_data, height, t, index);
// printf("Debug linear_interpolate: input=height:%d, t:%f, ... ; output=val:%f \n", height, t, val);
output_val += val;
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void linear_interpolate_gradient(
const int height,
T t,
T & w1, T & w2,
int & t_low, int & t_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (t < -1.0 || t > height) {
//empty
w1 = w2 = 0.;
t_low = t_high = -1;
return;
}
if (t <= 0) t = 0;
t_low = (int) t;
if (t_low >= height - 1) {
t_high = t_low = height - 1;
t = (T) t_low;
} else {
t_high = t_low + 1;
}
T lt = t - t_low;
T ht = 1. - lt;
// T val = (w1 * v1 + w2 * v2);
// T w1 = ht, w2 = lt;
w1 = ht , w2 = lt;
return;
}
template <typename T>
__global__ void Align1DBackwardFeature(const int nthreads, const T* top_diff,
const int num_rois, const T spatial_scale,
const int channels, const int height,
const int pooled_height,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, pt) is an element in the pooled output
int pt = (index ) % pooled_height;
int c = (index / pooled_height) % channels;
int n = index / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 3;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start= offset_bottom_rois[1] * spatial_scale;
T roi_end= offset_bottom_rois[2] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_height = max(roi_end- roi_start, (T)1.);
T bin_size = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height;
int top_offset = (n * channels + c) * pooled_height;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[pt];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid= (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid; // e.g. = 4
for (int it = 0; it < roi_bin_grid; it ++) // e.g., iy = 0, 1
{
const T t = roi_start+ pt * bin_size+ static_cast<T>(it + .5f) * bin_size/ static_cast<T>(roi_bin_grid); // e.g., 0.5, 1.5
T w1, w2;
int t_low, t_high;
linear_interpolate_gradient(height, t, w1, w2, t_low, t_high, index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
if (t_low >= 0 && t_high >= 0)
{
atomicAdd(offset_bottom_diff + t_low, static_cast<T>(g1));
atomicAdd(offset_bottom_diff + t_high, static_cast<T>(g2));
} // if
} // it
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
at::Tensor Align_forward_cuda(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int sampling_ratio) {
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto output = at::empty({num_rois, channels, pooled_height}, input.options());
auto output_size = num_rois * pooled_height * channels;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// dim3 grid(::min(THCCeilDiv((long)output_size, 512L), 4096L));
dim3 grid(::min(at::ceil_div((long)output_size, 512L), 4096L));
dim3 block(512);
// printf("Debug main function: height:%d\n", height);
if (output.numel() == 0) {
// THCudaCheck(hipGetLastError());
// c10::hip::HIPCachingAllocator::raw_alloc(hipGetLastError());
C10_HIP_CHECK(hipGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.type(), "Align1D_forward", [&] {
hipLaunchKernelGGL(( Align1DForward<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
pooled_height,
sampling_ratio,
rois.contiguous().data<scalar_t>(),
output.data<scalar_t>());
});
// THCudaCheck(hipGetLastError());
// c10::hip::HIPCachingAllocator::raw_alloc(hipGetLastError());
C10_HIP_CHECK(hipGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor Align_backward_cuda(const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int batch_size,
const int channels,
const int height,
const int sampling_ratio) {
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto grad_input = at::zeros({batch_size, channels, height}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// dim3 grid(::min(THCCeilDiv((long)grad.numel(), 512L), 4096L));
dim3 grid(::min(at::ceil_div((long)grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
// THCudaCheck(hipGetLastError());
// c10::hip::HIPCachingAllocator::raw_alloc(hipGetLastError());
C10_HIP_CHECK(hipGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlign_backward", [&] {
hipLaunchKernelGGL(( Align1DBackwardFeature<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.contiguous().data<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
pooled_height,
sampling_ratio,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>());
});
// THCudaCheck(hipGetLastError());
// c10::hip::HIPCachingAllocator::raw_alloc(hipGetLastError());
C10_HIP_CHECK(hipGetLastError());
return grad_input;
}
|
d64d237d9e009ba641afe92d804091a6b3c674ff.cu
|
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
// Modifies by Frost for 1D ussage
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
//for pytorch lower 1.8 version
// #include <THC/THC.h>
// #include <THC/THCAtomics.cuh>
// #include <THC/THCDeviceUtils.cuh>
//for pytorch 1.9 version
#include <ATen/ceil_div.h>
#include <ATen/cuda/ThrustAllocator.h>
#include <ATen/cuda/DeviceUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T linear_interpolate(const T* bottom_data,
const int height,
T t,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (t < -1.0 || t > height) {
//empty
return 0;
}
if (t <= 0) t = 0;
int t_low = (int) t;
int t_high;
// get closest integers to t
if (t_low >= height - 1) {
t_high = t_low = height - 1;
t = (T) t_low;
} else {
t_high = t_low + 1;
}
// get the distance to t
T lt = t - t_low;
T ht = 1. - lt;
// do linear interpolation
T v1 = bottom_data[t_low];
T v2 = bottom_data[t_high];
T w1 = ht, w2 = lt;
T val = (w1 * v1 + w2 * v2);
// printf("Check Linear Interpolate: w1=%f, v1=%f, w2=%f, v2=%f \n", w1, v1, w2, v2);
return val;
}
template <typename T>
__global__ void Align1DForward(const int nthreads, const T* bottom_data,
const T spatial_scale, const int channels,
const int height,
const int pooled_height,
const int sampling_ratio,
const T* bottom_rois, T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, pt) is an element in the pooled output
int pt = index % pooled_height;
int c = (index / pooled_height) % channels;
int n = index / pooled_height / channels;
//输入(batchsize,256,4Ts)
//输出(bz*Nq,256,16) 16是划分bin的数量
//rois(bz*Nq,3)
//sampling_ratio=0
// printf("Debug Main Loop: get pt, c, n are %d, %d, %d \n", pt, c, n);
const T* offset_bottom_rois = bottom_rois + n * 3;//取第n条rois数据
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start = offset_bottom_rois[1] * spatial_scale;
T roi_end = offset_bottom_rois[2] * spatial_scale;
// printf("Debug roi boundary: w1, w2, is %f, %f \n", roi_start,roi_end,);
// Force malformed ROIs to be 1x1
T roi_height = max(roi_end- roi_start, (T)1.);//0-T取值范围
T bin_size = static_cast<T>(roi_height) / static_cast<T>(pooled_height); //长度除bin
const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height; //取第ind个batch,第c个channel的数据,height是4Ts
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid; // e.g. = 4
T output_val = 0.;
for (int it = 0; it < roi_bin_grid; it ++) // e.g., it = 0, 1
{
const T t = roi_start + pt * bin_size + static_cast<T>(it + .5f) * bin_size / static_cast<T>(roi_bin_grid); // e.g., 0.5, 1.5
T val = linear_interpolate(offset_bottom_data, height, t, index);
// printf("Debug linear_interpolate: input=height:%d, t:%f, ... ; output=val:%f \n", height, t, val);
output_val += val;
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void linear_interpolate_gradient(
const int height,
T t,
T & w1, T & w2,
int & t_low, int & t_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (t < -1.0 || t > height) {
//empty
w1 = w2 = 0.;
t_low = t_high = -1;
return;
}
if (t <= 0) t = 0;
t_low = (int) t;
if (t_low >= height - 1) {
t_high = t_low = height - 1;
t = (T) t_low;
} else {
t_high = t_low + 1;
}
T lt = t - t_low;
T ht = 1. - lt;
// T val = (w1 * v1 + w2 * v2);
// T w1 = ht, w2 = lt;
w1 = ht , w2 = lt;
return;
}
template <typename T>
__global__ void Align1DBackwardFeature(const int nthreads, const T* top_diff,
const int num_rois, const T spatial_scale,
const int channels, const int height,
const int pooled_height,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, pt) is an element in the pooled output
int pt = (index ) % pooled_height;
int c = (index / pooled_height) % channels;
int n = index / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 3;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start= offset_bottom_rois[1] * spatial_scale;
T roi_end= offset_bottom_rois[2] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_height = max(roi_end- roi_start, (T)1.);
T bin_size = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height;
int top_offset = (n * channels + c) * pooled_height;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[pt];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid= (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid; // e.g. = 4
for (int it = 0; it < roi_bin_grid; it ++) // e.g., iy = 0, 1
{
const T t = roi_start+ pt * bin_size+ static_cast<T>(it + .5f) * bin_size/ static_cast<T>(roi_bin_grid); // e.g., 0.5, 1.5
T w1, w2;
int t_low, t_high;
linear_interpolate_gradient(height, t, w1, w2, t_low, t_high, index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
if (t_low >= 0 && t_high >= 0)
{
atomicAdd(offset_bottom_diff + t_low, static_cast<T>(g1));
atomicAdd(offset_bottom_diff + t_high, static_cast<T>(g2));
} // if
} // it
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
at::Tensor Align_forward_cuda(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int sampling_ratio) {
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto output = at::empty({num_rois, channels, pooled_height}, input.options());
auto output_size = num_rois * pooled_height * channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// dim3 grid(std::min(THCCeilDiv((long)output_size, 512L), 4096L));
dim3 grid(std::min(at::ceil_div((long)output_size, 512L), 4096L));
dim3 block(512);
// printf("Debug main function: height:%d\n", height);
if (output.numel() == 0) {
// THCudaCheck(cudaGetLastError());
// c10::cuda::CUDACachingAllocator::raw_alloc(cudaGetLastError());
C10_CUDA_CHECK(cudaGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.type(), "Align1D_forward", [&] {
Align1DForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
pooled_height,
sampling_ratio,
rois.contiguous().data<scalar_t>(),
output.data<scalar_t>());
});
// THCudaCheck(cudaGetLastError());
// c10::cuda::CUDACachingAllocator::raw_alloc(cudaGetLastError());
C10_CUDA_CHECK(cudaGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor Align_backward_cuda(const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int batch_size,
const int channels,
const int height,
const int sampling_ratio) {
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto grad_input = at::zeros({batch_size, channels, height}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// dim3 grid(std::min(THCCeilDiv((long)grad.numel(), 512L), 4096L));
dim3 grid(std::min(at::ceil_div((long)grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
// THCudaCheck(cudaGetLastError());
// c10::cuda::CUDACachingAllocator::raw_alloc(cudaGetLastError());
C10_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlign_backward", [&] {
Align1DBackwardFeature<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
pooled_height,
sampling_ratio,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>());
});
// THCudaCheck(cudaGetLastError());
// c10::cuda::CUDACachingAllocator::raw_alloc(cudaGetLastError());
C10_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
|
3176b9473b5585d34a3fc3e3a2e9e7f54fe73361.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <THH/THHTensorMathPointwise.cuh>
#include <THH/THHTensor.hpp>
#include <THH/generic/THHTensorMathPointwise.hip>
#include <THH/THHGenerateFloatType.h>
|
3176b9473b5585d34a3fc3e3a2e9e7f54fe73361.cu
|
#include <THC/THCTensorMathPointwise.cuh>
#include <THC/THCTensor.hpp>
#include <THC/generic/THCTensorMathPointwise.cu>
#include <THC/THCGenerateFloatType.h>
|
c1344dc57ac1872f04d720cf3a888b315affd2b2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
__global__
void add(unsigned long long int n, float *x, float *y)
{
int index = threadIdx.x;
int stride = blockDim.x;
for(unsigned long long int i = index; i<n; i+= stride)
y[i] = x[i]+ y[i];
}
int main(void)
{
unsigned long long int N= 1<<29;
float *x , *y;
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
for(unsigned long long int i = 0; i<N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
hipLaunchKernelGGL(( add), dim3(1), dim3(256), 0, 0, N, x, y);
hipDeviceSynchronize();
hipFree(x);
hipFree(y);
return 0;
}
|
c1344dc57ac1872f04d720cf3a888b315affd2b2.cu
|
#include <iostream>
#include <math.h>
__global__
void add(unsigned long long int n, float *x, float *y)
{
int index = threadIdx.x;
int stride = blockDim.x;
for(unsigned long long int i = index; i<n; i+= stride)
y[i] = x[i]+ y[i];
}
int main(void)
{
unsigned long long int N= 1<<29;
float *x , *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
for(unsigned long long int i = 0; i<N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
add<<<1, 256>>>(N, x, y);
cudaDeviceSynchronize();
cudaFree(x);
cudaFree(y);
return 0;
}
|
0e2a67eca4f15c2453fbdd3ee3062800f0916c6f.hip
|
// !!! This is a file automatically generated by hipify!!!
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include "sceneStructs.h"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#include <vector>
#include "glm/glm.hpp"
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//Kernel that does the initial raycast from the camera.
__host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov) {
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
//standard camera raycast stuff
glm::vec3 E = eye;
glm::vec3 C = view;
glm::vec3 U = up;
float fovx = fov.x;
float fovy = fov.y;
float CD = glm::length(C);
glm::vec3 A = glm::cross(C, U);
glm::vec3 B = glm::cross(A, C);
glm::vec3 M = E+C;
glm::vec3 H = (A*float(CD*tan(fovx*(PI/180))))/float(glm::length(A));
glm::vec3 V = (B*float(CD*tan(-fovy*(PI/180))))/float(glm::length(B));
float sx = (x)/(resolution.x-1);
float sy = (y)/(resolution.y-1);
glm::vec3 P = M + (((2*sx)-1)*H) + (((2*sy)-1)*V);
glm::vec3 PmE = P-E;
glm::vec3 R = E + (float(200)*(PmE))/float(glm::length(PmE));
glm::vec3 direction = glm::normalize(R);
//major performance cliff at this point, TODO: find out why!
ray r;
r.origin = eye;
r.direction = direction;
return r;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if (x<=resolution.x && y<=resolution.y) {
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if (x<=resolution.x && y<=resolution.y) {
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if (color.x>255) {
color.x = 255;
}
if (color.y>255) {
color.y = 255;
}
if (color.z>255) {
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
//TODO: IMPLEMENT THIS FUNCTION
//Core raytracer kernel
__global__ void raytraceRay(glm::vec2 resolution, float time, float bounce, cameraData cam, int rayDepth, glm::vec3* colors,
geom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
ray r = raycastFromCameraKernel(resolution, time, x, y, cam.position, cam.view, cam.up, cam.fov);
if (x<=resolution.x && y<=resolution.y) {
float MAX_DEPTH = 100000000000000000;
float depth = MAX_DEPTH;
for (int i=0; i<numberOfGeoms; i++) {
glm::vec3 intersectionPoint;
glm::vec3 intersectionNormal;
if (geoms[i].type==SPHERE) {
depth = sphereIntersectionTest(geoms[i], r, intersectionPoint, intersectionNormal);
} else if (geoms[i].type==CUBE) {
depth = boxIntersectionTest(geoms[i], r, intersectionPoint, intersectionNormal);
} else if (geoms[i].type==MESH) {
//triangle tests go here
} else {
//lol?
}
if (depth<MAX_DEPTH && depth>-EPSILON) {
MAX_DEPTH = depth;
colors[index] = materials[geoms[i].materialid].color;
}
}
}
}
//TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, cameraData cam, int iterations, material* cudamaterials, int numberOfMaterials, geom* cudageoms, int numberOfGeoms, glm::vec3* cudaimage) {
int traceDepth = 1; //determines how many bounces the raytracer traces
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(cam.resolution.x)/float(tileSize)), (int)ceil(float(cam.resolution.y)/float(tileSize)));
//kernel launches
for (int bounce = 1; bounce <= 1; ++bounce) {
hipLaunchKernelGGL(( raytraceRay), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, cam.resolution, (float)iterations, (float)bounce, cam, traceDepth, cudaimage, cudageoms, numberOfGeoms, cudamaterials, numberOfMaterials);
}
hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, cam.resolution, cudaimage);
// make certain the kernel has completed
hipDeviceSynchronize();
checkCUDAError("Kernel failed!");
}
|
0e2a67eca4f15c2453fbdd3ee3062800f0916c6f.cu
|
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include "sceneStructs.h"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#include <vector>
#include "glm/glm.hpp"
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//Kernel that does the initial raycast from the camera.
__host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov) {
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
//standard camera raycast stuff
glm::vec3 E = eye;
glm::vec3 C = view;
glm::vec3 U = up;
float fovx = fov.x;
float fovy = fov.y;
float CD = glm::length(C);
glm::vec3 A = glm::cross(C, U);
glm::vec3 B = glm::cross(A, C);
glm::vec3 M = E+C;
glm::vec3 H = (A*float(CD*tan(fovx*(PI/180))))/float(glm::length(A));
glm::vec3 V = (B*float(CD*tan(-fovy*(PI/180))))/float(glm::length(B));
float sx = (x)/(resolution.x-1);
float sy = (y)/(resolution.y-1);
glm::vec3 P = M + (((2*sx)-1)*H) + (((2*sy)-1)*V);
glm::vec3 PmE = P-E;
glm::vec3 R = E + (float(200)*(PmE))/float(glm::length(PmE));
glm::vec3 direction = glm::normalize(R);
//major performance cliff at this point, TODO: find out why!
ray r;
r.origin = eye;
r.direction = direction;
return r;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if (x<=resolution.x && y<=resolution.y) {
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if (x<=resolution.x && y<=resolution.y) {
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if (color.x>255) {
color.x = 255;
}
if (color.y>255) {
color.y = 255;
}
if (color.z>255) {
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
//TODO: IMPLEMENT THIS FUNCTION
//Core raytracer kernel
__global__ void raytraceRay(glm::vec2 resolution, float time, float bounce, cameraData cam, int rayDepth, glm::vec3* colors,
geom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
ray r = raycastFromCameraKernel(resolution, time, x, y, cam.position, cam.view, cam.up, cam.fov);
if (x<=resolution.x && y<=resolution.y) {
float MAX_DEPTH = 100000000000000000;
float depth = MAX_DEPTH;
for (int i=0; i<numberOfGeoms; i++) {
glm::vec3 intersectionPoint;
glm::vec3 intersectionNormal;
if (geoms[i].type==SPHERE) {
depth = sphereIntersectionTest(geoms[i], r, intersectionPoint, intersectionNormal);
} else if (geoms[i].type==CUBE) {
depth = boxIntersectionTest(geoms[i], r, intersectionPoint, intersectionNormal);
} else if (geoms[i].type==MESH) {
//triangle tests go here
} else {
//lol?
}
if (depth<MAX_DEPTH && depth>-EPSILON) {
MAX_DEPTH = depth;
colors[index] = materials[geoms[i].materialid].color;
}
}
}
}
//TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, cameraData cam, int iterations, material* cudamaterials, int numberOfMaterials, geom* cudageoms, int numberOfGeoms, glm::vec3* cudaimage) {
int traceDepth = 1; //determines how many bounces the raytracer traces
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(cam.resolution.x)/float(tileSize)), (int)ceil(float(cam.resolution.y)/float(tileSize)));
//kernel launches
for (int bounce = 1; bounce <= 1; ++bounce) {
raytraceRay<<<fullBlocksPerGrid, threadsPerBlock>>>(cam.resolution, (float)iterations, (float)bounce, cam, traceDepth, cudaimage, cudageoms, numberOfGeoms, cudamaterials, numberOfMaterials);
}
sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, cam.resolution, cudaimage);
// make certain the kernel has completed
cudaThreadSynchronize();
checkCUDAError("Kernel failed!");
}
|
6464f03c608b35da1e8880a4473c638401e1a8c8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __NVCC__
// __device__ volatile int PQ[MAX_NODE];
//K in parallel
template <class U>
__global__ void extractMin(unsigned int* PQ, unsigned int* PQ_size, int* expandNodes,int* expandNodes_size,U* Cx,int* openList,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<K && PQ_size[id]>0){
//extract min from PQ
int front = id* ( (N+K-1)/K );
int node = PQ[front];
// restructure the heap
PQ[front]=PQ[front+PQ_size[id]-1];
PQ_size[id]-=1;
int pqIndex = 0;
while(2*pqIndex+1 < PQ_size[id]){
if(2*pqIndex+2 >= PQ_size[id]){
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else
break;
}
else{
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){
int swap = PQ[front + 2*pqIndex+2];
PQ[front + 2*pqIndex+2] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+2;
}
else{
break;
}
}
}
//removed from openList
openList[node] = -1;
//added to expand next
int len = atomicAdd(expandNodes_size,1);
expandNodes[len]=node;
}
}
//for K in parallel
template <class T,class U>
__global__ void A_star_expand(int* off,int* edge, T* W,U* Hx,int* parent,volatile U* Cx,
int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList,int* nVFlag,
int N,int E, int K,int dest,
int flagDiff,int dE,
int* diff_off,int* diff_edge,unsigned int* diff_weight ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id< *expandNodes_size ){
int node = expandNodes[id];
//reach dest
if(node == dest){
atomicOr(flagfound,1);
}
// expand
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(leaveLoop==false){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
//diff expand
if(flagDiff){
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start<end){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
//end diff
}//end
}
//K in parallel -- O(N)
template <class U>
__global__ void keepHeapPQ(unsigned int* PQ,unsigned int* PQ_size,U* Cx,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0){
int front = id*( (N+K-1)/K );
int size = PQ_size[id];
for(int i=front;i<front+size;i++){
if(2*i+2 < front+size){
int cost = Cx[PQ[i]];
int costLeft = Cx[PQ[2*i+1]];
int costRight = Cx[PQ[2*i+2]];
if( cost > costLeft || cost > costRight ){
int index ;
if(costLeft <= costRight)
index = 2*i+1;
else
index = 2*i+2;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
else if(2*i+1 < front+size){
if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){
int index = 2*i+1;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
}
}
//N threads
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
if(nextFlag[id]==1){
int index = atomicAdd(nvSize,1);
nextV[index]=id;
}
}
}
//for K in parallel
template <class U>
__global__ void insertPQ(unsigned int* PQ,unsigned int* PQS,int* nextV,int* nVsize,U* Cx,int K,int N,int* openList){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K){
int front = id*( (N+K-1)/K );
int i = id;
while(i<*nVsize){
//if not already present
if(openList[nextV[i]]!=-1){
i+=K;
continue;
}
PQ[front+PQS[id]]= nextV[i];
PQS[id]+=1;
//add in openList
openList[nextV[i]] = id;
if(PQS[id]>1){
int index = PQS[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
i += K;
}
}
}
//for K in parallel
template <class U>
__global__ void checkMIN(unsigned int* PQ, unsigned int* PQ_size,int* flagEnd,U* Cx,int dest,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0 ){
int front = id* ( (N+K-1)/K );
int node = PQ[front];
//check if atleast one min, dont end the a*
if( Cx[node] < Cx[dest] ){
atomicAnd(flagEnd,0);
}
}
}
template <class T, class U>
__global__ void propogateDel(int* delEdgesV,int delEdge, volatile U* Cx,
int* rev_offset,int* rev_edges,T* rev_weight,int N,int E,
U* Hx,volatile int* parent,int* parent_old,int* addFlag,
int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight,int dE){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<delEdge){
int node = delEdgesV[id];
//check for the parent and add to nextflag and update the cost
int start = rev_offset[node];
int end = E;
if(node!=N-1)
end = rev_offset[node+1];
//no parent
// write in parent read always from old_parent
parent[node] = -1;
Cx[node]=INT_MAX;
addFlag[node]=1;
int cost = INT_MAX;
int opt_parent = -1;
//if any parent can change the cost
while(start< end){
int p = rev_edges[start];
//del edges
if(p<0 || p==node){
start++;
continue;
}
int weight = rev_weight[start];
int flag_cycle = false;
//check parent doesn't contain node
int ancestor = parent_old[p];
while(ancestor>0){
if(ancestor==node){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
//no need to lock only single parent so only one node in array so one node per thread
if(!flag_cycle && Cx[p]!=INT_MAX && cost > (Cx[p]-Hx[p])+weight+Hx[node] ){
cost = (Cx[p]-Hx[p] )+weight+Hx[node];
opt_parent = p;
}
start++;
}
start = rev_diff_offset[node];
end = dE;
if(node!=N-1)
end = rev_diff_offset[node+1];
while(start< end){
int p = rev_diff_edges[start];
//del edges
if(p<0 || p==node){
start++;
continue;
}
int weight = rev_diff_weight[start];
int flag_cycle = false;
//check parent doesn't contain node
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==node){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
//no need to lock only single parent so only one node in array so one node per thread
if(!flag_cycle && Cx[p]!=INT_MAX && cost > (Cx[p]-Hx[p])+weight+Hx[node] ){
cost = (Cx[p]-Hx[p] )+weight+Hx[node];
opt_parent = p;
}
start++;
}
//write here
if(cost!=INT_MAX){
Cx[node]=cost;
parent[node]=opt_parent;
}
}
}
//add inserted edges to propogate
template <class T, class U>
__global__ void propogateAdd(int* diff_off, int* diff_edges,T* diff_W,U* Hx,int* addFlag,
volatile U* Cx,int* lock, int* parent, int* parent_old, int N, int dE){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
int node = id;
int start = diff_off[node];
int end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edges[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
bool flag_cycle = false;
int ancestor = node;
while(ancestor > 0){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[node] != INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child];
parent[child] = node;
__threadfence();
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
}
template <class T,class U>
__global__ void insert_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx,
int N,int E,volatile U* Cx,int* lock, int* parent,int* addFlag,
int* diff_off,int* diff_edge,T* diff_W,int dE){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < *size){
int node = nodes[id];
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end ){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
bool leaveLoop = false;
while(!leaveLoop){
if(atomicExch(&lock[child],1)==0){
if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
leaveLoop = true;
atomicExch(&lock[child],0);
}
__syncthreads();
}
start++;
}
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
}
template <class T,class U>
__global__ void delete_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx,
int N,int E,volatile U* Cx,int* lock, int* parent,int* parent_old,int* addFlag,
int* diff_off,int* diff_edge,T* diff_W,int dE,
int* rev_offset,int* rev_edges,T* rev_weight,
int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < *size){
int node = nodes[id];
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end ){
int child = edge[start];
if(child<0){
start++;
continue;
}
bool leaveLoop = false;
while(!leaveLoop){
if(atomicExch(&lock[child],1)==0){
if(Cx[node]!=INT_MAX && Cx[child]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
else
if( (Cx[node]==INT_MAX && parent[child]==node ) || ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ W[start]+ Hx[child]) ) ){
//use back edges
int rstart = rev_offset[child];
int rend = E;
if(child!=N-1)
rend = rev_offset[child+1];
//there is always one parent that is node.
Cx[child] = INT_MAX;
parent[child]=-1;
while(rstart < rend){
int p = rev_edges[rstart];
if(p<0 || p == child){
rstart++;
continue;
}
int weight = rev_weight[rstart];
bool flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor > 0){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
rstart = rev_diff_offset[child];
rend = dE;
if(child!=N-1)
rend = rev_diff_offset[child+1];
while(rstart < rend){
int p = rev_diff_edges[rstart];
if(p<0 || p==child){
rstart++;
continue;
}
int weight = rev_diff_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
addFlag[child]=1;
}
leaveLoop = true;
atomicExch(&lock[child],0);
}
__syncthreads();
}
start++;
}
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
if(Cx[node]!=INT_MAX && Cx[child]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
else
if((Cx[node]==INT_MAX && parent[child]==node )|| ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ diff_W[start]+ Hx[child]) ) ){
//use back edges
int rstart = rev_offset[child];
int rend = E;
if(child!=N-1)
rend = rev_offset[child+1];
//there is always one parent that is node.
Cx[child] = INT_MAX;
parent[child]=-1;
while(rstart < rend){
int p = rev_edges[rstart];
if(p<0 || p ==child){
rstart++;
continue;
}
int weight = rev_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child)
flag_cycle = true;
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
rstart = rev_diff_offset[child];
rend = dE;
if(child!=N-1)
rend = rev_diff_offset[child+1];
while(rstart < rend){
int p = rev_diff_edges[rstart];
if(p<0 || p==child){
rstart++;
continue;
}
int weight = rev_diff_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
}
//do in 1 thread
template <class U>
__global__ void insertDest(unsigned int* PQ,unsigned int* PQ_size,U* Cx,int dest,int* openList){
int id = 0;
int front = 0;
if(openList[dest]==-1){
PQ[front+PQ_size[id]]= dest;
PQ_size[id]+=1;
//add in openList
openList[dest] = id;
if(PQ_size[id]>1){
int index = PQ_size[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
template <class U>
__global__ void getCx(U* Cx,int dest,U* val){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id==0){
*val = Cx[dest];
}
}
#endif
|
6464f03c608b35da1e8880a4473c638401e1a8c8.cu
|
#ifdef __NVCC__
// __device__ volatile int PQ[MAX_NODE];
//K in parallel
template <class U>
__global__ void extractMin(unsigned int* PQ, unsigned int* PQ_size, int* expandNodes,int* expandNodes_size,U* Cx,int* openList,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<K && PQ_size[id]>0){
//extract min from PQ
int front = id* ( (N+K-1)/K );
int node = PQ[front];
// restructure the heap
PQ[front]=PQ[front+PQ_size[id]-1];
PQ_size[id]-=1;
int pqIndex = 0;
while(2*pqIndex+1 < PQ_size[id]){
if(2*pqIndex+2 >= PQ_size[id]){
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else
break;
}
else{
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){
int swap = PQ[front + 2*pqIndex+2];
PQ[front + 2*pqIndex+2] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+2;
}
else{
break;
}
}
}
//removed from openList
openList[node] = -1;
//added to expand next
int len = atomicAdd(expandNodes_size,1);
expandNodes[len]=node;
}
}
//for K in parallel
template <class T,class U>
__global__ void A_star_expand(int* off,int* edge, T* W,U* Hx,int* parent,volatile U* Cx,
int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList,int* nVFlag,
int N,int E, int K,int dest,
int flagDiff,int dE,
int* diff_off,int* diff_edge,unsigned int* diff_weight ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id< *expandNodes_size ){
int node = expandNodes[id];
//reach dest
if(node == dest){
atomicOr(flagfound,1);
}
// expand
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(leaveLoop==false){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
//diff expand
if(flagDiff){
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start<end){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
//end diff
}//end
}
//K in parallel -- O(N)
template <class U>
__global__ void keepHeapPQ(unsigned int* PQ,unsigned int* PQ_size,U* Cx,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0){
int front = id*( (N+K-1)/K );
int size = PQ_size[id];
for(int i=front;i<front+size;i++){
if(2*i+2 < front+size){
int cost = Cx[PQ[i]];
int costLeft = Cx[PQ[2*i+1]];
int costRight = Cx[PQ[2*i+2]];
if( cost > costLeft || cost > costRight ){
int index ;
if(costLeft <= costRight)
index = 2*i+1;
else
index = 2*i+2;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
else if(2*i+1 < front+size){
if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){
int index = 2*i+1;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
}
}
//N threads
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
if(nextFlag[id]==1){
int index = atomicAdd(nvSize,1);
nextV[index]=id;
}
}
}
//for K in parallel
template <class U>
__global__ void insertPQ(unsigned int* PQ,unsigned int* PQS,int* nextV,int* nVsize,U* Cx,int K,int N,int* openList){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K){
int front = id*( (N+K-1)/K );
int i = id;
while(i<*nVsize){
//if not already present
if(openList[nextV[i]]!=-1){
i+=K;
continue;
}
PQ[front+PQS[id]]= nextV[i];
PQS[id]+=1;
//add in openList
openList[nextV[i]] = id;
if(PQS[id]>1){
int index = PQS[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
i += K;
}
}
}
//for K in parallel
template <class U>
__global__ void checkMIN(unsigned int* PQ, unsigned int* PQ_size,int* flagEnd,U* Cx,int dest,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0 ){
int front = id* ( (N+K-1)/K );
int node = PQ[front];
//check if atleast one min, dont end the a*
if( Cx[node] < Cx[dest] ){
atomicAnd(flagEnd,0);
}
}
}
template <class T, class U>
__global__ void propogateDel(int* delEdgesV,int delEdge, volatile U* Cx,
int* rev_offset,int* rev_edges,T* rev_weight,int N,int E,
U* Hx,volatile int* parent,int* parent_old,int* addFlag,
int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight,int dE){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<delEdge){
int node = delEdgesV[id];
//check for the parent and add to nextflag and update the cost
int start = rev_offset[node];
int end = E;
if(node!=N-1)
end = rev_offset[node+1];
//no parent
// write in parent read always from old_parent
parent[node] = -1;
Cx[node]=INT_MAX;
addFlag[node]=1;
int cost = INT_MAX;
int opt_parent = -1;
//if any parent can change the cost
while(start< end){
int p = rev_edges[start];
//del edges
if(p<0 || p==node){
start++;
continue;
}
int weight = rev_weight[start];
int flag_cycle = false;
//check parent doesn't contain node
int ancestor = parent_old[p];
while(ancestor>0){
if(ancestor==node){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
//no need to lock only single parent so only one node in array so one node per thread
if(!flag_cycle && Cx[p]!=INT_MAX && cost > (Cx[p]-Hx[p])+weight+Hx[node] ){
cost = (Cx[p]-Hx[p] )+weight+Hx[node];
opt_parent = p;
}
start++;
}
start = rev_diff_offset[node];
end = dE;
if(node!=N-1)
end = rev_diff_offset[node+1];
while(start< end){
int p = rev_diff_edges[start];
//del edges
if(p<0 || p==node){
start++;
continue;
}
int weight = rev_diff_weight[start];
int flag_cycle = false;
//check parent doesn't contain node
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==node){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
//no need to lock only single parent so only one node in array so one node per thread
if(!flag_cycle && Cx[p]!=INT_MAX && cost > (Cx[p]-Hx[p])+weight+Hx[node] ){
cost = (Cx[p]-Hx[p] )+weight+Hx[node];
opt_parent = p;
}
start++;
}
//write here
if(cost!=INT_MAX){
Cx[node]=cost;
parent[node]=opt_parent;
}
}
}
//add inserted edges to propogate
template <class T, class U>
__global__ void propogateAdd(int* diff_off, int* diff_edges,T* diff_W,U* Hx,int* addFlag,
volatile U* Cx,int* lock, int* parent, int* parent_old, int N, int dE){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
int node = id;
int start = diff_off[node];
int end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edges[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
bool flag_cycle = false;
int ancestor = node;
while(ancestor > 0){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[node] != INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child];
parent[child] = node;
__threadfence();
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
}
template <class T,class U>
__global__ void insert_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx,
int N,int E,volatile U* Cx,int* lock, int* parent,int* addFlag,
int* diff_off,int* diff_edge,T* diff_W,int dE){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < *size){
int node = nodes[id];
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end ){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
bool leaveLoop = false;
while(!leaveLoop){
if(atomicExch(&lock[child],1)==0){
if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
leaveLoop = true;
atomicExch(&lock[child],0);
}
__syncthreads();
}
start++;
}
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
}
template <class T,class U>
__global__ void delete_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx,
int N,int E,volatile U* Cx,int* lock, int* parent,int* parent_old,int* addFlag,
int* diff_off,int* diff_edge,T* diff_W,int dE,
int* rev_offset,int* rev_edges,T* rev_weight,
int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < *size){
int node = nodes[id];
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end ){
int child = edge[start];
if(child<0){
start++;
continue;
}
bool leaveLoop = false;
while(!leaveLoop){
if(atomicExch(&lock[child],1)==0){
if(Cx[node]!=INT_MAX && Cx[child]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
else
if( (Cx[node]==INT_MAX && parent[child]==node ) || ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ W[start]+ Hx[child]) ) ){
//use back edges
int rstart = rev_offset[child];
int rend = E;
if(child!=N-1)
rend = rev_offset[child+1];
//there is always one parent that is node.
Cx[child] = INT_MAX;
parent[child]=-1;
while(rstart < rend){
int p = rev_edges[rstart];
if(p<0 || p == child){
rstart++;
continue;
}
int weight = rev_weight[rstart];
bool flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor > 0){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
rstart = rev_diff_offset[child];
rend = dE;
if(child!=N-1)
rend = rev_diff_offset[child+1];
while(rstart < rend){
int p = rev_diff_edges[rstart];
if(p<0 || p==child){
rstart++;
continue;
}
int weight = rev_diff_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
addFlag[child]=1;
}
leaveLoop = true;
atomicExch(&lock[child],0);
}
__syncthreads();
}
start++;
}
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
if(Cx[node]!=INT_MAX && Cx[child]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
else
if((Cx[node]==INT_MAX && parent[child]==node )|| ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ diff_W[start]+ Hx[child]) ) ){
//use back edges
int rstart = rev_offset[child];
int rend = E;
if(child!=N-1)
rend = rev_offset[child+1];
//there is always one parent that is node.
Cx[child] = INT_MAX;
parent[child]=-1;
while(rstart < rend){
int p = rev_edges[rstart];
if(p<0 || p ==child){
rstart++;
continue;
}
int weight = rev_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child)
flag_cycle = true;
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
rstart = rev_diff_offset[child];
rend = dE;
if(child!=N-1)
rend = rev_diff_offset[child+1];
while(rstart < rend){
int p = rev_diff_edges[rstart];
if(p<0 || p==child){
rstart++;
continue;
}
int weight = rev_diff_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
}
//do in 1 thread
template <class U>
__global__ void insertDest(unsigned int* PQ,unsigned int* PQ_size,U* Cx,int dest,int* openList){
int id = 0;
int front = 0;
if(openList[dest]==-1){
PQ[front+PQ_size[id]]= dest;
PQ_size[id]+=1;
//add in openList
openList[dest] = id;
if(PQ_size[id]>1){
int index = PQ_size[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
template <class U>
__global__ void getCx(U* Cx,int dest,U* val){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id==0){
*val = Cx[dest];
}
}
#endif
|
e03eaf0f8514586932d0c8df19f5cb27fb4f818e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void vecAdd(unsigned int *A_d, unsigned int *B_d, unsigned int *C_d, int WORK_SIZE) {
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Populate vecADD kernel function ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < WORK_SIZE)
C_d[id] = A_d[id] + B_d[id];
}
|
e03eaf0f8514586932d0c8df19f5cb27fb4f818e.cu
|
#include "includes.h"
__global__ void vecAdd(unsigned int *A_d, unsigned int *B_d, unsigned int *C_d, int WORK_SIZE) {
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Populate vecADD kernel function ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < WORK_SIZE)
C_d[id] = A_d[id] + B_d[id];
}
|
94b1af223063afa630216eb227be9d5f2a39dc15.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <hip/hip_cooperative_groups.h>
#include <math.h>
#include <string.h>
#include <sstream>
#include <fstream>
//#include <bits/stdc++.h>
//#include <stdlib.h>
//#include <time.h>
using namespace std;
//using namespace cooperative_groups;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 128
#define C 96
#define H 31
#define W 31
#define R 5
#define S 5
#define M 256
#define E 27
#define F 27
#define U 1
__global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt)
{
for(int i=0; i<num_ch; i++)
{ int row = threadIdx.y; int col = threadIdx.x;
d_r[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] += d_o[i*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] ;
}
}
__global__
void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{float prod;
int row = threadIdx.y; int col = threadIdx.x;
if((row<height) && (col<width)){
for (int i=0; i<wt_width; i++){
for (int j=0; j<wt_width; j++){
// for(int k=0; k<num_ch; k++){
float ip = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+(stride*(row)+i)*ip_height+(stride*(col)+j)];
float wt = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(i*wt_width+j)];
prod = ip*wt;
d_o[blockIdx.z*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] += prod;
//printf("the value of out is %f for index %d,%d\n",)
//}
}
}
if(d_o[blockIdx.z*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col]<0)
d_o[blockIdx.z*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col]=0;
}
}
void element_wise_mmul(float* output, float* input, float* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
float wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = ip*wt;
if(prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
float *WT = (float*) malloc(M*C*R*S*sizeof(float));
//float WT[R][S];
float* d_o;
float* d_i;
float* d_w;
float* d_r;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/*INITIALIZING WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
//WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1;
WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(float)(RAND_MAX+1.0);
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
// IP[c][d] = (a+b+c+d);
// IP[n*C*H*W+k*H*W+c*W+d] = (c+1);
if ((c<=1) || (d<=1) || (c>=29) || (d>=29))
IP[n*C*H*W+k*H*W+c*W+d] = 0;
else
IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0);
}
}
}
}
if(hipSuccess != hipMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float)))
{
printf("error in d_i malloc\n");
}
hipMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), hipMemcpyHostToDevice);
if(hipSuccess != hipMalloc((void**) &d_w, M*C*R*S*sizeof(float)))
{
printf("error in d_w malloc\n");
}
hipMemcpy(d_w, WT, M*C*R*S*sizeof(float), hipMemcpyHostToDevice);
if(hipSuccess != hipMalloc((void**) &d_o,(long int)C*batch_size*M*E*F*sizeof(float)))
{
printf("error in d_o malloc\n");
}
if(hipSuccess != hipMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float)))
{
printf("error in d_r malloc\n");
}
//cpu_start = clock();
//element_wise_mmul(OP, IP, WT, batch_size);
//cpu_end = clock();
dim3 dimGrid(batch_size,256,96);
dim3 dimBlock(27,27,1);
dim3 dimGridRed(batch_size,256,1);
dim3 dimBlockRed(27,27,1);
//int op_height = 3; int op_width = 3; int stride = 1; int ip_height = 4;int wt_height = 2; int num_wt = 96; int num_img = 1; int num_ch = 384;
//gpu_start = clock();hipLaunchKernelGGL((
ew_gpu_mmul), dim3(dimGrid), dim3(dimBlock), 0, 0, d_o,d_i,d_w,27,27,1,31,5,256,batch_size,96);
hipDeviceSynchronize();hipLaunchKernelGGL((
red_ch), dim3(dimGridRed), dim3(dimBlockRed), 0, 0, d_r,d_o,96,batch_size,256);
//gpu_end = clock();
//void *kernelArgs[] = {(void *)&d_o, (void *)&d_i, (void *)&d_w,(void *)&op_height, (void *)&op_width, (void *)&stride, (void *)&ip_height,(void *)&wt_height, (void *)&num_wt, (void *)&num_img, (void *)&num_ch };
//hipLaunchCooperativeKernel((void*)ew_gpu_mmul,dimGrid,dimBlock,kernelArgs,0,NULL);
//hipDeviceSynchronize();
hipMemcpy(OPG,d_r,batch_size*M*E*F*sizeof(float), hipMemcpyDeviceToHost);
/**print outputs**/
//int e,f,g,h;
int g,h,s,u;
float max_error = 0;
string filename = "layer_2_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
//for (t=0;t<C;t++){
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
//float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if(error > max_error)
max_error = error;
// printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error is %f\n", max_error);
//}
//cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
hipFree(d_o);
hipFree(d_i);
hipFree(d_w);
hipFree(d_r);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
|
94b1af223063afa630216eb227be9d5f2a39dc15.cu
|
#include <stdio.h>
#include <iostream>
#include <cooperative_groups.h>
#include <math.h>
#include <string.h>
#include <sstream>
#include <fstream>
//#include <bits/stdc++.h>
//#include <stdlib.h>
//#include <time.h>
using namespace std;
//using namespace cooperative_groups;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 128
#define C 96
#define H 31
#define W 31
#define R 5
#define S 5
#define M 256
#define E 27
#define F 27
#define U 1
__global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt)
{
for(int i=0; i<num_ch; i++)
{ int row = threadIdx.y; int col = threadIdx.x;
d_r[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] += d_o[i*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] ;
}
}
__global__
void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{float prod;
int row = threadIdx.y; int col = threadIdx.x;
if((row<height) && (col<width)){
for (int i=0; i<wt_width; i++){
for (int j=0; j<wt_width; j++){
// for(int k=0; k<num_ch; k++){
float ip = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+(stride*(row)+i)*ip_height+(stride*(col)+j)];
float wt = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(i*wt_width+j)];
prod = ip*wt;
d_o[blockIdx.z*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] += prod;
//printf("the value of out is %f for index %d,%d\n",)
//}
}
}
if(d_o[blockIdx.z*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col]<0)
d_o[blockIdx.z*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col]=0;
}
}
void element_wise_mmul(float* output, float* input, float* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
float wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = ip*wt;
if(prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
float *WT = (float*) malloc(M*C*R*S*sizeof(float));
//float WT[R][S];
float* d_o;
float* d_i;
float* d_w;
float* d_r;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/*INITIALIZING WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
//WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1;
WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(float)(RAND_MAX+1.0);
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
// IP[c][d] = (a+b+c+d);
// IP[n*C*H*W+k*H*W+c*W+d] = (c+1);
if ((c<=1) || (d<=1) || (c>=29) || (d>=29))
IP[n*C*H*W+k*H*W+c*W+d] = 0;
else
IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0);
}
}
}
}
if(cudaSuccess != cudaMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float)))
{
printf("error in d_i malloc\n");
}
cudaMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), cudaMemcpyHostToDevice);
if(cudaSuccess != cudaMalloc((void**) &d_w, M*C*R*S*sizeof(float)))
{
printf("error in d_w malloc\n");
}
cudaMemcpy(d_w, WT, M*C*R*S*sizeof(float), cudaMemcpyHostToDevice);
if(cudaSuccess != cudaMalloc((void**) &d_o,(long int)C*batch_size*M*E*F*sizeof(float)))
{
printf("error in d_o malloc\n");
}
if(cudaSuccess != cudaMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float)))
{
printf("error in d_r malloc\n");
}
//cpu_start = clock();
//element_wise_mmul(OP, IP, WT, batch_size);
//cpu_end = clock();
dim3 dimGrid(batch_size,256,96);
dim3 dimBlock(27,27,1);
dim3 dimGridRed(batch_size,256,1);
dim3 dimBlockRed(27,27,1);
//int op_height = 3; int op_width = 3; int stride = 1; int ip_height = 4;int wt_height = 2; int num_wt = 96; int num_img = 1; int num_ch = 384;
//gpu_start = clock();
ew_gpu_mmul<<<dimGrid, dimBlock>>>(d_o,d_i,d_w,27,27,1,31,5,256,batch_size,96);
cudaDeviceSynchronize();
red_ch<<<dimGridRed, dimBlockRed>>>(d_r,d_o,96,batch_size,256);
//gpu_end = clock();
//void *kernelArgs[] = {(void *)&d_o, (void *)&d_i, (void *)&d_w,(void *)&op_height, (void *)&op_width, (void *)&stride, (void *)&ip_height,(void *)&wt_height, (void *)&num_wt, (void *)&num_img, (void *)&num_ch };
//cudaLaunchCooperativeKernel((void*)ew_gpu_mmul,dimGrid,dimBlock,kernelArgs,0,NULL);
//cudaDeviceSynchronize();
cudaMemcpy(OPG,d_r,batch_size*M*E*F*sizeof(float), cudaMemcpyDeviceToHost);
/**print outputs**/
//int e,f,g,h;
int g,h,s,u;
float max_error = 0;
string filename = "layer_2_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
//for (t=0;t<C;t++){
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
//float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if(error > max_error)
max_error = error;
// printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error is %f\n", max_error);
//}
//cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
cudaFree(d_o);
cudaFree(d_i);
cudaFree(d_w);
cudaFree(d_r);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
|
fe2312b5e81dc15cec89ca8d98ec50f530c2a169.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "depth_filter/cuda/allocator.cuh"
using namespace depth_filter;
using namespace std;
template <typename T>
__global__
void test_2d_class(AbstractAllocator<T> *dev_ptr) {
size_t tidx = blockIdx.x*blockDim.x+threadIdx.x;
size_t tidy = blockIdx.y*blockDim.y+threadIdx.y;
if ((tidx < dev_ptr->width()) && (tidy < dev_ptr->height()))
(*dev_ptr)(tidx, tidy) = (*dev_ptr)(tidx, tidy)*tidx*tidy;
}
int main() {
float data[] = {1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
12, 13, 14, 15};
AbstractAllocator<float> a(4, 4);
a.set_data(data);
float dev2host[16];
a.get_data(dev2host);
for (size_t i = 0; i < 16; ++i)
printf("i = %lu\t:\t%f\n", i, dev2host[i]);
printf("\n");
dim3 grid_size((4+16-1)/4, (4+16-1)/4);
dim3 block_size(16, 16);
hipLaunchKernelGGL(( test_2d_class), dim3(grid_size), dim3(block_size), 0, 0, a.ptr_);
hipDeviceSynchronize();
a.get_data(dev2host);
for (size_t i = 0; i < 16; ++i)
printf("i = %lu\t:\t%f\n", i, dev2host[i]);
printf("\n");
}
|
fe2312b5e81dc15cec89ca8d98ec50f530c2a169.cu
|
#include "depth_filter/cuda/allocator.cuh"
using namespace depth_filter;
using namespace std;
template <typename T>
__global__
void test_2d_class(AbstractAllocator<T> *dev_ptr) {
size_t tidx = blockIdx.x*blockDim.x+threadIdx.x;
size_t tidy = blockIdx.y*blockDim.y+threadIdx.y;
if ((tidx < dev_ptr->width()) && (tidy < dev_ptr->height()))
(*dev_ptr)(tidx, tidy) = (*dev_ptr)(tidx, tidy)*tidx*tidy;
}
int main() {
float data[] = {1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
12, 13, 14, 15};
AbstractAllocator<float> a(4, 4);
a.set_data(data);
float dev2host[16];
a.get_data(dev2host);
for (size_t i = 0; i < 16; ++i)
printf("i = %lu\t:\t%f\n", i, dev2host[i]);
printf("\n");
dim3 grid_size((4+16-1)/4, (4+16-1)/4);
dim3 block_size(16, 16);
test_2d_class<<<grid_size, block_size>>>(a.ptr_);
cudaDeviceSynchronize();
a.get_data(dev2host);
for (size_t i = 0; i < 16; ++i)
printf("i = %lu\t:\t%f\n", i, dev2host[i]);
printf("\n");
}
|
228e5f7ef550fcff72d7cf6dc018849a22ad8c6d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/detection/box_decoder_and_assign_op.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void DecodeBoxKernel(const T* prior_box_data,
const T* prior_box_var_data,
const T* target_box_data,
const int roi_num,
const int class_num,
const T box_clip,
T* output_box_data) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < roi_num * class_num) {
int i = idx / class_num;
int j = idx % class_num;
T prior_box_width = prior_box_data[i * 4 + 2] - prior_box_data[i * 4] + 1;
T prior_box_height =
prior_box_data[i * 4 + 3] - prior_box_data[i * 4 + 1] + 1;
T prior_box_center_x = prior_box_data[i * 4] + prior_box_width / 2;
T prior_box_center_y = prior_box_data[i * 4 + 1] + prior_box_height / 2;
int offset = i * class_num * 4 + j * 4;
T dw = prior_box_var_data[2] * target_box_data[offset + 2];
T dh = prior_box_var_data[3] * target_box_data[offset + 3];
if (dw > box_clip) {
dw = box_clip;
}
if (dh > box_clip) {
dh = box_clip;
}
T target_box_center_x = 0, target_box_center_y = 0;
T target_box_width = 0, target_box_height = 0;
target_box_center_x =
prior_box_var_data[0] * target_box_data[offset] * prior_box_width +
prior_box_center_x;
target_box_center_y =
prior_box_var_data[1] * target_box_data[offset + 1] * prior_box_height +
prior_box_center_y;
target_box_width = expf(dw) * prior_box_width;
target_box_height = expf(dh) * prior_box_height;
output_box_data[offset] = target_box_center_x - target_box_width / 2;
output_box_data[offset + 1] = target_box_center_y - target_box_height / 2;
output_box_data[offset + 2] =
target_box_center_x + target_box_width / 2 - 1;
output_box_data[offset + 3] =
target_box_center_y + target_box_height / 2 - 1;
}
}
template <typename T>
__global__ void AssignBoxKernel(const T* prior_box_data,
const T* box_score_data,
T* output_box_data,
const int roi_num,
const int class_num,
T* output_assign_box_data) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < roi_num) {
int i = idx;
T max_score = -1;
int max_j = -1;
for (int j = 0; j < class_num; ++j) {
T score = box_score_data[i * class_num + j];
if (score > max_score && j > 0) {
max_score = score;
max_j = j;
}
}
if (max_j > 0) {
for (int pno = 0; pno < 4; pno++) {
output_assign_box_data[i * 4 + pno] =
output_box_data[i * class_num * 4 + max_j * 4 + pno];
}
} else {
for (int pno = 0; pno < 4; pno++) {
output_assign_box_data[i * 4 + pno] = prior_box_data[i * 4 + pno];
}
}
}
}
template <typename DeviceContext, typename T>
class BoxDecoderAndAssignCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* prior_box = context.Input<framework::LoDTensor>("PriorBox");
auto* prior_box_var = context.Input<phi::DenseTensor>("PriorBoxVar");
auto* target_box = context.Input<framework::LoDTensor>("TargetBox");
auto* box_score = context.Input<framework::LoDTensor>("BoxScore");
auto* output_box = context.Output<phi::DenseTensor>("DecodeBox");
auto* output_assign_box =
context.Output<phi::DenseTensor>("OutputAssignBox");
auto roi_num = target_box->dims()[0];
auto class_num = box_score->dims()[1];
auto* target_box_data = target_box->data<T>();
auto* prior_box_data = prior_box->data<T>();
auto* prior_box_var_data = prior_box_var->data<T>();
auto* box_score_data = box_score->data<T>();
output_box->mutable_data<T>({roi_num, class_num * 4}, context.GetPlace());
output_assign_box->mutable_data<T>({roi_num, 4}, context.GetPlace());
T* output_box_data = output_box->data<T>();
T* output_assign_box_data = output_assign_box->data<T>();
int block = 512;
int grid = (roi_num * class_num + block - 1) / block;
auto& device_ctx = context.cuda_device_context();
const T box_clip = static_cast<T>(context.Attr<float>("box_clip"));
hipLaunchKernelGGL(( DecodeBoxKernel<T>)
, dim3(grid), dim3(block), 0, device_ctx.stream(), prior_box_data,
prior_box_var_data,
target_box_data,
roi_num,
class_num,
box_clip,
output_box_data);
context.device_context().Wait();
int assign_grid = (roi_num + block - 1) / block;
hipLaunchKernelGGL(( AssignBoxKernel<T>), dim3(assign_grid), dim3(block), 0, device_ctx.stream(),
prior_box_data,
box_score_data,
output_box_data,
roi_num,
class_num,
output_assign_box_data);
context.device_context().Wait();
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
box_decoder_and_assign,
ops::BoxDecoderAndAssignCUDAKernel<phi::GPUContext, float>,
ops::BoxDecoderAndAssignCUDAKernel<phi::GPUContext, double>);
|
228e5f7ef550fcff72d7cf6dc018849a22ad8c6d.cu
|
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/detection/box_decoder_and_assign_op.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void DecodeBoxKernel(const T* prior_box_data,
const T* prior_box_var_data,
const T* target_box_data,
const int roi_num,
const int class_num,
const T box_clip,
T* output_box_data) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < roi_num * class_num) {
int i = idx / class_num;
int j = idx % class_num;
T prior_box_width = prior_box_data[i * 4 + 2] - prior_box_data[i * 4] + 1;
T prior_box_height =
prior_box_data[i * 4 + 3] - prior_box_data[i * 4 + 1] + 1;
T prior_box_center_x = prior_box_data[i * 4] + prior_box_width / 2;
T prior_box_center_y = prior_box_data[i * 4 + 1] + prior_box_height / 2;
int offset = i * class_num * 4 + j * 4;
T dw = prior_box_var_data[2] * target_box_data[offset + 2];
T dh = prior_box_var_data[3] * target_box_data[offset + 3];
if (dw > box_clip) {
dw = box_clip;
}
if (dh > box_clip) {
dh = box_clip;
}
T target_box_center_x = 0, target_box_center_y = 0;
T target_box_width = 0, target_box_height = 0;
target_box_center_x =
prior_box_var_data[0] * target_box_data[offset] * prior_box_width +
prior_box_center_x;
target_box_center_y =
prior_box_var_data[1] * target_box_data[offset + 1] * prior_box_height +
prior_box_center_y;
target_box_width = expf(dw) * prior_box_width;
target_box_height = expf(dh) * prior_box_height;
output_box_data[offset] = target_box_center_x - target_box_width / 2;
output_box_data[offset + 1] = target_box_center_y - target_box_height / 2;
output_box_data[offset + 2] =
target_box_center_x + target_box_width / 2 - 1;
output_box_data[offset + 3] =
target_box_center_y + target_box_height / 2 - 1;
}
}
template <typename T>
__global__ void AssignBoxKernel(const T* prior_box_data,
const T* box_score_data,
T* output_box_data,
const int roi_num,
const int class_num,
T* output_assign_box_data) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < roi_num) {
int i = idx;
T max_score = -1;
int max_j = -1;
for (int j = 0; j < class_num; ++j) {
T score = box_score_data[i * class_num + j];
if (score > max_score && j > 0) {
max_score = score;
max_j = j;
}
}
if (max_j > 0) {
for (int pno = 0; pno < 4; pno++) {
output_assign_box_data[i * 4 + pno] =
output_box_data[i * class_num * 4 + max_j * 4 + pno];
}
} else {
for (int pno = 0; pno < 4; pno++) {
output_assign_box_data[i * 4 + pno] = prior_box_data[i * 4 + pno];
}
}
}
}
template <typename DeviceContext, typename T>
class BoxDecoderAndAssignCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* prior_box = context.Input<framework::LoDTensor>("PriorBox");
auto* prior_box_var = context.Input<phi::DenseTensor>("PriorBoxVar");
auto* target_box = context.Input<framework::LoDTensor>("TargetBox");
auto* box_score = context.Input<framework::LoDTensor>("BoxScore");
auto* output_box = context.Output<phi::DenseTensor>("DecodeBox");
auto* output_assign_box =
context.Output<phi::DenseTensor>("OutputAssignBox");
auto roi_num = target_box->dims()[0];
auto class_num = box_score->dims()[1];
auto* target_box_data = target_box->data<T>();
auto* prior_box_data = prior_box->data<T>();
auto* prior_box_var_data = prior_box_var->data<T>();
auto* box_score_data = box_score->data<T>();
output_box->mutable_data<T>({roi_num, class_num * 4}, context.GetPlace());
output_assign_box->mutable_data<T>({roi_num, 4}, context.GetPlace());
T* output_box_data = output_box->data<T>();
T* output_assign_box_data = output_assign_box->data<T>();
int block = 512;
int grid = (roi_num * class_num + block - 1) / block;
auto& device_ctx = context.cuda_device_context();
const T box_clip = static_cast<T>(context.Attr<float>("box_clip"));
DecodeBoxKernel<T>
<<<grid, block, 0, device_ctx.stream()>>>(prior_box_data,
prior_box_var_data,
target_box_data,
roi_num,
class_num,
box_clip,
output_box_data);
context.device_context().Wait();
int assign_grid = (roi_num + block - 1) / block;
AssignBoxKernel<T><<<assign_grid, block, 0, device_ctx.stream()>>>(
prior_box_data,
box_score_data,
output_box_data,
roi_num,
class_num,
output_assign_box_data);
context.device_context().Wait();
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
box_decoder_and_assign,
ops::BoxDecoderAndAssignCUDAKernel<phi::GPUContext, float>,
ops::BoxDecoderAndAssignCUDAKernel<phi::GPUContext, double>);
|
0ea58d9e3157d46b07e753f66386108e9492eebd.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/matrix/kernelparams.h>
#include <cuml/svm/svm_model.h>
#include <cuml/svm/svm_parameter.h>
#include <cmath>
#include <cuml/svm/svc.hpp>
#include <sstream>
#include <utility>
#include "benchmark.cuh"
namespace ML {
namespace Bench {
namespace SVM {
template <typename D>
struct SvcParams {
DatasetParams data;
BlobsParams blobs;
MLCommon::Matrix::KernelParams kernel;
ML::SVM::svmParameter svm_param;
ML::SVM::svmModel<D> model;
};
template <typename D>
class SVC : public BlobsFixture<D, D> {
public:
SVC(const std::string& name, const SvcParams<D>& p)
: BlobsFixture<D, D>(name, p.data, p.blobs),
kernel(p.kernel),
model(p.model),
svm_param(p.svm_param)
{
std::vector<std::string> kernel_names{"linear", "poly", "rbf", "tanh"};
std::ostringstream oss;
oss << name << "/" << kernel_names[kernel.kernel] << p.data;
this->SetName(oss.str().c_str());
}
protected:
void runBenchmark(::benchmark::State& state) override
{
if (this->params.rowMajor) { state.SkipWithError("SVC only supports col-major inputs"); }
if (this->svm_param.svmType != ML::SVM::C_SVC) {
state.SkipWithError("SVC currently only supports C_SVC");
}
this->loopOnState(state, [this]() {
ML::SVM::svcFit(*this->handle,
this->data.X,
this->params.nrows,
this->params.ncols,
this->data.y,
this->svm_param,
this->kernel,
this->model);
CUDA_CHECK(hipStreamSynchronize(this->stream));
ML::SVM::svmFreeBuffers(*this->handle, this->model);
});
}
private:
MLCommon::Matrix::KernelParams kernel;
ML::SVM::svmParameter svm_param;
ML::SVM::svmModel<D> model;
};
template <typename D>
std::vector<SvcParams<D>> getInputs()
{
struct Triplets {
int nrows, ncols, nclasses;
};
std::vector<SvcParams<D>> out;
SvcParams<D> p;
p.data.rowMajor = false;
p.blobs.cluster_std = 1.0;
p.blobs.shuffle = false;
p.blobs.center_box_min = -2.0;
p.blobs.center_box_max = 2.0;
p.blobs.seed = 12345ULL;
// svmParameter{C, cache_size, max_iter, nochange_steps, tol, verbosity})
p.svm_param = ML::SVM::svmParameter{1, 200, 100, 100, 1e-3, CUML_LEVEL_INFO, 0, ML::SVM::C_SVC};
p.model = ML::SVM::svmModel<D>{0, 0, 0, nullptr, nullptr, nullptr, 0, nullptr};
std::vector<Triplets> rowcols = {{50000, 2, 2}, {2048, 100000, 2}, {50000, 1000, 2}};
std::vector<MLCommon::Matrix::KernelParams> kernels{
MLCommon::Matrix::KernelParams{MLCommon::Matrix::LINEAR, 3, 1, 0},
MLCommon::Matrix::KernelParams{MLCommon::Matrix::POLYNOMIAL, 3, 1, 0},
MLCommon::Matrix::KernelParams{MLCommon::Matrix::RBF, 3, 1, 0},
MLCommon::Matrix::KernelParams{MLCommon::Matrix::TANH, 3, 0.1, 0}};
for (auto& rc : rowcols) {
p.data.nrows = rc.nrows;
p.data.ncols = rc.ncols;
p.data.nclasses = rc.nclasses;
// Limit the number of iterations for large tests
p.svm_param.max_iter = (rc.nrows > 10000) ? 20 : 100;
for (auto kernel : kernels) {
p.kernel = kernel;
p.kernel.gamma = 1.0 / rc.ncols;
out.push_back(p);
}
}
return out;
}
ML_BENCH_REGISTER(SvcParams<float>, SVC<float>, "blobs", getInputs<float>());
ML_BENCH_REGISTER(SvcParams<double>, SVC<double>, "blobs", getInputs<double>());
} // namespace SVM
} // namespace Bench
} // end namespace ML
|
0ea58d9e3157d46b07e753f66386108e9492eebd.cu
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/matrix/kernelparams.h>
#include <cuml/svm/svm_model.h>
#include <cuml/svm/svm_parameter.h>
#include <cmath>
#include <cuml/svm/svc.hpp>
#include <sstream>
#include <utility>
#include "benchmark.cuh"
namespace ML {
namespace Bench {
namespace SVM {
template <typename D>
struct SvcParams {
DatasetParams data;
BlobsParams blobs;
MLCommon::Matrix::KernelParams kernel;
ML::SVM::svmParameter svm_param;
ML::SVM::svmModel<D> model;
};
template <typename D>
class SVC : public BlobsFixture<D, D> {
public:
SVC(const std::string& name, const SvcParams<D>& p)
: BlobsFixture<D, D>(name, p.data, p.blobs),
kernel(p.kernel),
model(p.model),
svm_param(p.svm_param)
{
std::vector<std::string> kernel_names{"linear", "poly", "rbf", "tanh"};
std::ostringstream oss;
oss << name << "/" << kernel_names[kernel.kernel] << p.data;
this->SetName(oss.str().c_str());
}
protected:
void runBenchmark(::benchmark::State& state) override
{
if (this->params.rowMajor) { state.SkipWithError("SVC only supports col-major inputs"); }
if (this->svm_param.svmType != ML::SVM::C_SVC) {
state.SkipWithError("SVC currently only supports C_SVC");
}
this->loopOnState(state, [this]() {
ML::SVM::svcFit(*this->handle,
this->data.X,
this->params.nrows,
this->params.ncols,
this->data.y,
this->svm_param,
this->kernel,
this->model);
CUDA_CHECK(cudaStreamSynchronize(this->stream));
ML::SVM::svmFreeBuffers(*this->handle, this->model);
});
}
private:
MLCommon::Matrix::KernelParams kernel;
ML::SVM::svmParameter svm_param;
ML::SVM::svmModel<D> model;
};
template <typename D>
std::vector<SvcParams<D>> getInputs()
{
struct Triplets {
int nrows, ncols, nclasses;
};
std::vector<SvcParams<D>> out;
SvcParams<D> p;
p.data.rowMajor = false;
p.blobs.cluster_std = 1.0;
p.blobs.shuffle = false;
p.blobs.center_box_min = -2.0;
p.blobs.center_box_max = 2.0;
p.blobs.seed = 12345ULL;
// svmParameter{C, cache_size, max_iter, nochange_steps, tol, verbosity})
p.svm_param = ML::SVM::svmParameter{1, 200, 100, 100, 1e-3, CUML_LEVEL_INFO, 0, ML::SVM::C_SVC};
p.model = ML::SVM::svmModel<D>{0, 0, 0, nullptr, nullptr, nullptr, 0, nullptr};
std::vector<Triplets> rowcols = {{50000, 2, 2}, {2048, 100000, 2}, {50000, 1000, 2}};
std::vector<MLCommon::Matrix::KernelParams> kernels{
MLCommon::Matrix::KernelParams{MLCommon::Matrix::LINEAR, 3, 1, 0},
MLCommon::Matrix::KernelParams{MLCommon::Matrix::POLYNOMIAL, 3, 1, 0},
MLCommon::Matrix::KernelParams{MLCommon::Matrix::RBF, 3, 1, 0},
MLCommon::Matrix::KernelParams{MLCommon::Matrix::TANH, 3, 0.1, 0}};
for (auto& rc : rowcols) {
p.data.nrows = rc.nrows;
p.data.ncols = rc.ncols;
p.data.nclasses = rc.nclasses;
// Limit the number of iterations for large tests
p.svm_param.max_iter = (rc.nrows > 10000) ? 20 : 100;
for (auto kernel : kernels) {
p.kernel = kernel;
p.kernel.gamma = 1.0 / rc.ncols;
out.push_back(p);
}
}
return out;
}
ML_BENCH_REGISTER(SvcParams<float>, SVC<float>, "blobs", getInputs<float>());
ML_BENCH_REGISTER(SvcParams<double>, SVC<double>, "blobs", getInputs<double>());
} // namespace SVM
} // namespace Bench
} // end namespace ML
|
30622d11a3e8409eb31ad6c1ee86d8fdd51d0903.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <hip/hip_runtime.h>
#define N 1500
#define TILE_SIZE 4
#define MILI 1000
#define NANO 1000000000
void checkCudaError(hipError_t errorCode)
{
if (errorCode != hipSuccess)
{
fprintf(stderr, "Error %d\n", errorCode);
exit(1);
}
}
float** createSquareMatOnHost(int size)
{
int i;
float **mat;
mat = (float **) malloc(size * sizeof(float *));
if (!mat)
{
fprintf(stderr, "error allocating row memory");
exit(1);
}
mat[0] = (float *) malloc(size * size * sizeof(float));
if (!mat[0])
{
fprintf(stderr, "error allocating col memory");
exit(1);
}
for (i = 1; i < size; i++)
mat[i] = mat[i-1] + size;
return mat;
}
void freeSquareMatOnHost(float **mat)
{
free(mat[0]);
free(mat);
}
void printSquareMat(float **mat, int size)
{
int i, j;
for (i = 0; i < size; i++, printf("\n"))
for (j = 0; j < size; j++)
printf(" %f", mat[i][j]);
}
void multiplySquareMatOnHost(float **C, float **A, float **B, int size)
{
int i, j, k;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
float sum = 0.0;
for (k = 0; k < size; k++)
sum += A[i][k] * B[k][j];
C[i][j] = sum;
}
}
__global__ void multiplySquareSerializedMatOnDevice(float *C, float *A, float *B, int size)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if (i < size && j < size)
{
int k;
float sum = 0.0;
for (k = 0; k < size; k++)
sum += A[i*size+k] * B[k*size+j];
C[i*size+j] = sum;
}
}
long long convertToNsec(struct timespec ts)
{
long long tmp = (long long) ts.tv_sec*NANO + ts.tv_nsec;
return tmp;
}
int main(void)
{
float **ha, **hb, **hc, **hd; // host data
float *da, *db, *dc; // device data
int i, j;
int nbytes = N * N * sizeof(float);
// allocate memory in host
ha = createSquareMatOnHost(N);
hb = createSquareMatOnHost(N);
hc = createSquareMatOnHost(N);
hd = createSquareMatOnHost(N);
// allocate memory in device
checkCudaError(hipMalloc((void **) &da, nbytes));
checkCudaError(hipMalloc((void **) &db, nbytes));
checkCudaError(hipMalloc((void **) &dc, nbytes));
// set values in ha randomly
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
ha[i][j] = rand() % 10;
// set values in hb randomly
srand(time(NULL));
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
hb[i][j] = rand() % 10;
// copy from host to device
checkCudaError(hipMemcpy(da, ha[0], nbytes, hipMemcpyHostToDevice));
checkCudaError(hipMemcpy(db, hb[0], nbytes, hipMemcpyHostToDevice));
// multiply matrix on host
struct timespec ts_start, ts_end;
clock_gettime(CLOCK_MONOTONIC, &ts_start);
multiplySquareMatOnHost(hd, ha, hb, N);
clock_gettime(CLOCK_MONOTONIC, &ts_end);
// compute elapsed time
long long hostElapsedTime = convertToNsec(ts_end) - convertToNsec(ts_start);
printf("CPU time: %lf\n", (double) hostElapsedTime / NANO);
// multiply matrix on device
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int gridSize = (N/TILE_SIZE) + (N%TILE_SIZE>0?1:0);
dim3 grid(gridSize, gridSize), block(TILE_SIZE, TILE_SIZE);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( multiplySquareSerializedMatOnDevice), dim3(grid), dim3(block), 0, 0, dc, da, db, N);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// compute elapsed time
float deviceElapsedTime;
hipEventElapsedTime(&deviceElapsedTime, start, stop);
printf("CUDA time: %f\n", deviceElapsedTime / MILI);
// copy from device to host
checkCudaError(hipMemcpy(hc[0], dc, nbytes, hipMemcpyDeviceToHost));
// assertion
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
assert(hc[i][j] == hd[i][j]);
// free memory
freeSquareMatOnHost(ha);
freeSquareMatOnHost(hb);
freeSquareMatOnHost(hc);
freeSquareMatOnHost(hd);
hipFree(da);
hipFree(db);
hipFree(dc);
}
|
30622d11a3e8409eb31ad6c1ee86d8fdd51d0903.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <cuda.h>
#define N 1500
#define TILE_SIZE 4
#define MILI 1000
#define NANO 1000000000
void checkCudaError(cudaError_t errorCode)
{
if (errorCode != cudaSuccess)
{
fprintf(stderr, "Error %d\n", errorCode);
exit(1);
}
}
float** createSquareMatOnHost(int size)
{
int i;
float **mat;
mat = (float **) malloc(size * sizeof(float *));
if (!mat)
{
fprintf(stderr, "error allocating row memory");
exit(1);
}
mat[0] = (float *) malloc(size * size * sizeof(float));
if (!mat[0])
{
fprintf(stderr, "error allocating col memory");
exit(1);
}
for (i = 1; i < size; i++)
mat[i] = mat[i-1] + size;
return mat;
}
void freeSquareMatOnHost(float **mat)
{
free(mat[0]);
free(mat);
}
void printSquareMat(float **mat, int size)
{
int i, j;
for (i = 0; i < size; i++, printf("\n"))
for (j = 0; j < size; j++)
printf(" %f", mat[i][j]);
}
void multiplySquareMatOnHost(float **C, float **A, float **B, int size)
{
int i, j, k;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
float sum = 0.0;
for (k = 0; k < size; k++)
sum += A[i][k] * B[k][j];
C[i][j] = sum;
}
}
__global__ void multiplySquareSerializedMatOnDevice(float *C, float *A, float *B, int size)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if (i < size && j < size)
{
int k;
float sum = 0.0;
for (k = 0; k < size; k++)
sum += A[i*size+k] * B[k*size+j];
C[i*size+j] = sum;
}
}
long long convertToNsec(struct timespec ts)
{
long long tmp = (long long) ts.tv_sec*NANO + ts.tv_nsec;
return tmp;
}
int main(void)
{
float **ha, **hb, **hc, **hd; // host data
float *da, *db, *dc; // device data
int i, j;
int nbytes = N * N * sizeof(float);
// allocate memory in host
ha = createSquareMatOnHost(N);
hb = createSquareMatOnHost(N);
hc = createSquareMatOnHost(N);
hd = createSquareMatOnHost(N);
// allocate memory in device
checkCudaError(cudaMalloc((void **) &da, nbytes));
checkCudaError(cudaMalloc((void **) &db, nbytes));
checkCudaError(cudaMalloc((void **) &dc, nbytes));
// set values in ha randomly
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
ha[i][j] = rand() % 10;
// set values in hb randomly
srand(time(NULL));
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
hb[i][j] = rand() % 10;
// copy from host to device
checkCudaError(cudaMemcpy(da, ha[0], nbytes, cudaMemcpyHostToDevice));
checkCudaError(cudaMemcpy(db, hb[0], nbytes, cudaMemcpyHostToDevice));
// multiply matrix on host
struct timespec ts_start, ts_end;
clock_gettime(CLOCK_MONOTONIC, &ts_start);
multiplySquareMatOnHost(hd, ha, hb, N);
clock_gettime(CLOCK_MONOTONIC, &ts_end);
// compute elapsed time
long long hostElapsedTime = convertToNsec(ts_end) - convertToNsec(ts_start);
printf("CPU time: %lf\n", (double) hostElapsedTime / NANO);
// multiply matrix on device
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int gridSize = (N/TILE_SIZE) + (N%TILE_SIZE>0?1:0);
dim3 grid(gridSize, gridSize), block(TILE_SIZE, TILE_SIZE);
cudaEventRecord(start, 0);
multiplySquareSerializedMatOnDevice<<<grid, block>>>(dc, da, db, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// compute elapsed time
float deviceElapsedTime;
cudaEventElapsedTime(&deviceElapsedTime, start, stop);
printf("CUDA time: %f\n", deviceElapsedTime / MILI);
// copy from device to host
checkCudaError(cudaMemcpy(hc[0], dc, nbytes, cudaMemcpyDeviceToHost));
// assertion
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
assert(hc[i][j] == hd[i][j]);
// free memory
freeSquareMatOnHost(ha);
freeSquareMatOnHost(hb);
freeSquareMatOnHost(hc);
freeSquareMatOnHost(hd);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
}
|
04a45b24116d019256265361f4922f3e780c7f95.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_l1;
int xdim0_update_halo_kernel1_l1_h = -1;
__constant__ int ydim0_update_halo_kernel1_l1;
int ydim0_update_halo_kernel1_l1_h = -1;
__constant__ int xdim1_update_halo_kernel1_l1;
int xdim1_update_halo_kernel1_l1_h = -1;
__constant__ int ydim1_update_halo_kernel1_l1;
int ydim1_update_halo_kernel1_l1_h = -1;
__constant__ int xdim2_update_halo_kernel1_l1;
int xdim2_update_halo_kernel1_l1_h = -1;
__constant__ int ydim2_update_halo_kernel1_l1;
int ydim2_update_halo_kernel1_l1_h = -1;
__constant__ int xdim3_update_halo_kernel1_l1;
int xdim3_update_halo_kernel1_l1_h = -1;
__constant__ int ydim3_update_halo_kernel1_l1;
int ydim3_update_halo_kernel1_l1_h = -1;
__constant__ int xdim4_update_halo_kernel1_l1;
int xdim4_update_halo_kernel1_l1_h = -1;
__constant__ int ydim4_update_halo_kernel1_l1;
int ydim4_update_halo_kernel1_l1_h = -1;
__constant__ int xdim5_update_halo_kernel1_l1;
int xdim5_update_halo_kernel1_l1_h = -1;
__constant__ int ydim5_update_halo_kernel1_l1;
int ydim5_update_halo_kernel1_l1_h = -1;
__constant__ int xdim6_update_halo_kernel1_l1;
int xdim6_update_halo_kernel1_l1_h = -1;
__constant__ int ydim6_update_halo_kernel1_l1;
int ydim6_update_halo_kernel1_l1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel1_l1*(y)+xdim0_update_halo_kernel1_l1*ydim0_update_halo_kernel1_l1*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel1_l1*(y)+xdim1_update_halo_kernel1_l1*ydim1_update_halo_kernel1_l1*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_update_halo_kernel1_l1*(y)+xdim2_update_halo_kernel1_l1*ydim2_update_halo_kernel1_l1*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_update_halo_kernel1_l1*(y)+xdim3_update_halo_kernel1_l1*ydim3_update_halo_kernel1_l1*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_update_halo_kernel1_l1*(y)+xdim4_update_halo_kernel1_l1*ydim4_update_halo_kernel1_l1*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_update_halo_kernel1_l1*(y)+xdim5_update_halo_kernel1_l1*ydim5_update_halo_kernel1_l1*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_update_halo_kernel1_l1*(y)+xdim6_update_halo_kernel1_l1*ydim6_update_halo_kernel1_l1*(z))
//user function
__device__
inline void update_halo_kernel1_l1_gpu(double *density0, double *density1,
double *energy0, double *energy1,
double *pressure, double *viscosity,
double *soundspeed , const int* fields) {
if(fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0,0,0)] = density0[OPS_ACC0(1,0,0)];
if(fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0,0,0)] = density1[OPS_ACC1(1,0,0)];
if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0,0,0)] = energy0[OPS_ACC2(1,0,0)];
if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0,0,0)] = energy1[OPS_ACC3(1,0,0)];
if(fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0,0,0)] = pressure[OPS_ACC4(1,0,0)];
if(fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0,0,0)] = viscosity[OPS_ACC5(1,0,0)];
if(fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0,0,0)] = soundspeed[OPS_ACC6(1,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void ops_update_halo_kernel1_l1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
const int* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_l1 + idx_z * 1*1 * xdim0_update_halo_kernel1_l1 * ydim0_update_halo_kernel1_l1;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_l1 + idx_z * 1*1 * xdim1_update_halo_kernel1_l1 * ydim1_update_halo_kernel1_l1;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_l1 + idx_z * 1*1 * xdim2_update_halo_kernel1_l1 * ydim2_update_halo_kernel1_l1;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_l1 + idx_z * 1*1 * xdim3_update_halo_kernel1_l1 * ydim3_update_halo_kernel1_l1;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_l1 + idx_z * 1*1 * xdim4_update_halo_kernel1_l1 * ydim4_update_halo_kernel1_l1;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_l1 + idx_z * 1*1 * xdim5_update_halo_kernel1_l1 * ydim5_update_halo_kernel1_l1;
arg6 += idx_x * 1*1 + idx_y * 1*1 * xdim6_update_halo_kernel1_l1 + idx_z * 1*1 * xdim6_update_halo_kernel1_l1 * ydim6_update_halo_kernel1_l1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel1_l1_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_l1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
#else
void ops_par_loop_update_halo_kernel1_l1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,8,range,17)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(17,"update_halo_kernel1_l1");
OPS_kernels[17].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel1_l1_h || ydim0 != ydim0_update_halo_kernel1_l1_h || xdim1 != xdim1_update_halo_kernel1_l1_h || ydim1 != ydim1_update_halo_kernel1_l1_h || xdim2 != xdim2_update_halo_kernel1_l1_h || ydim2 != ydim2_update_halo_kernel1_l1_h || xdim3 != xdim3_update_halo_kernel1_l1_h || ydim3 != ydim3_update_halo_kernel1_l1_h || xdim4 != xdim4_update_halo_kernel1_l1_h || ydim4 != ydim4_update_halo_kernel1_l1_h || xdim5 != xdim5_update_halo_kernel1_l1_h || ydim5 != ydim5_update_halo_kernel1_l1_h || xdim6 != xdim6_update_halo_kernel1_l1_h || ydim6 != ydim6_update_halo_kernel1_l1_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel1_l1, &xdim0, sizeof(int) );
xdim0_update_halo_kernel1_l1_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel1_l1, &ydim0, sizeof(int) );
ydim0_update_halo_kernel1_l1_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel1_l1, &xdim1, sizeof(int) );
xdim1_update_halo_kernel1_l1_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel1_l1, &ydim1, sizeof(int) );
ydim1_update_halo_kernel1_l1_h = ydim1;
hipMemcpyToSymbol( xdim2_update_halo_kernel1_l1, &xdim2, sizeof(int) );
xdim2_update_halo_kernel1_l1_h = xdim2;
hipMemcpyToSymbol( ydim2_update_halo_kernel1_l1, &ydim2, sizeof(int) );
ydim2_update_halo_kernel1_l1_h = ydim2;
hipMemcpyToSymbol( xdim3_update_halo_kernel1_l1, &xdim3, sizeof(int) );
xdim3_update_halo_kernel1_l1_h = xdim3;
hipMemcpyToSymbol( ydim3_update_halo_kernel1_l1, &ydim3, sizeof(int) );
ydim3_update_halo_kernel1_l1_h = ydim3;
hipMemcpyToSymbol( xdim4_update_halo_kernel1_l1, &xdim4, sizeof(int) );
xdim4_update_halo_kernel1_l1_h = xdim4;
hipMemcpyToSymbol( ydim4_update_halo_kernel1_l1, &ydim4, sizeof(int) );
ydim4_update_halo_kernel1_l1_h = ydim4;
hipMemcpyToSymbol( xdim5_update_halo_kernel1_l1, &xdim5, sizeof(int) );
xdim5_update_halo_kernel1_l1_h = xdim5;
hipMemcpyToSymbol( ydim5_update_halo_kernel1_l1, &ydim5, sizeof(int) );
ydim5_update_halo_kernel1_l1_h = ydim5;
hipMemcpyToSymbol( xdim6_update_halo_kernel1_l1, &xdim6, sizeof(int) );
xdim6_update_halo_kernel1_l1_h = xdim6;
hipMemcpyToSymbol( ydim6_update_halo_kernel1_l1, &ydim6, sizeof(int) );
ydim6_update_halo_kernel1_l1_h = ydim6;
}
int *arg7h = (int *)arg7.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[8];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[17].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel1_l1), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[17].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[17].mpi_time += t2-t1;
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_l1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 17;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 17;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg*)malloc(8*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int));
desc->args[7].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_l1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(17,"update_halo_kernel1_l1");
}
ops_enqueue_kernel(desc);
}
#endif
|
04a45b24116d019256265361f4922f3e780c7f95.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_l1;
int xdim0_update_halo_kernel1_l1_h = -1;
__constant__ int ydim0_update_halo_kernel1_l1;
int ydim0_update_halo_kernel1_l1_h = -1;
__constant__ int xdim1_update_halo_kernel1_l1;
int xdim1_update_halo_kernel1_l1_h = -1;
__constant__ int ydim1_update_halo_kernel1_l1;
int ydim1_update_halo_kernel1_l1_h = -1;
__constant__ int xdim2_update_halo_kernel1_l1;
int xdim2_update_halo_kernel1_l1_h = -1;
__constant__ int ydim2_update_halo_kernel1_l1;
int ydim2_update_halo_kernel1_l1_h = -1;
__constant__ int xdim3_update_halo_kernel1_l1;
int xdim3_update_halo_kernel1_l1_h = -1;
__constant__ int ydim3_update_halo_kernel1_l1;
int ydim3_update_halo_kernel1_l1_h = -1;
__constant__ int xdim4_update_halo_kernel1_l1;
int xdim4_update_halo_kernel1_l1_h = -1;
__constant__ int ydim4_update_halo_kernel1_l1;
int ydim4_update_halo_kernel1_l1_h = -1;
__constant__ int xdim5_update_halo_kernel1_l1;
int xdim5_update_halo_kernel1_l1_h = -1;
__constant__ int ydim5_update_halo_kernel1_l1;
int ydim5_update_halo_kernel1_l1_h = -1;
__constant__ int xdim6_update_halo_kernel1_l1;
int xdim6_update_halo_kernel1_l1_h = -1;
__constant__ int ydim6_update_halo_kernel1_l1;
int ydim6_update_halo_kernel1_l1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel1_l1*(y)+xdim0_update_halo_kernel1_l1*ydim0_update_halo_kernel1_l1*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel1_l1*(y)+xdim1_update_halo_kernel1_l1*ydim1_update_halo_kernel1_l1*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_update_halo_kernel1_l1*(y)+xdim2_update_halo_kernel1_l1*ydim2_update_halo_kernel1_l1*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_update_halo_kernel1_l1*(y)+xdim3_update_halo_kernel1_l1*ydim3_update_halo_kernel1_l1*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_update_halo_kernel1_l1*(y)+xdim4_update_halo_kernel1_l1*ydim4_update_halo_kernel1_l1*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_update_halo_kernel1_l1*(y)+xdim5_update_halo_kernel1_l1*ydim5_update_halo_kernel1_l1*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_update_halo_kernel1_l1*(y)+xdim6_update_halo_kernel1_l1*ydim6_update_halo_kernel1_l1*(z))
//user function
__device__
inline void update_halo_kernel1_l1_gpu(double *density0, double *density1,
double *energy0, double *energy1,
double *pressure, double *viscosity,
double *soundspeed , const int* fields) {
if(fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0,0,0)] = density0[OPS_ACC0(1,0,0)];
if(fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0,0,0)] = density1[OPS_ACC1(1,0,0)];
if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0,0,0)] = energy0[OPS_ACC2(1,0,0)];
if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0,0,0)] = energy1[OPS_ACC3(1,0,0)];
if(fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0,0,0)] = pressure[OPS_ACC4(1,0,0)];
if(fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0,0,0)] = viscosity[OPS_ACC5(1,0,0)];
if(fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0,0,0)] = soundspeed[OPS_ACC6(1,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void ops_update_halo_kernel1_l1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
const int* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_l1 + idx_z * 1*1 * xdim0_update_halo_kernel1_l1 * ydim0_update_halo_kernel1_l1;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_l1 + idx_z * 1*1 * xdim1_update_halo_kernel1_l1 * ydim1_update_halo_kernel1_l1;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_l1 + idx_z * 1*1 * xdim2_update_halo_kernel1_l1 * ydim2_update_halo_kernel1_l1;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_l1 + idx_z * 1*1 * xdim3_update_halo_kernel1_l1 * ydim3_update_halo_kernel1_l1;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_l1 + idx_z * 1*1 * xdim4_update_halo_kernel1_l1 * ydim4_update_halo_kernel1_l1;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_l1 + idx_z * 1*1 * xdim5_update_halo_kernel1_l1 * ydim5_update_halo_kernel1_l1;
arg6 += idx_x * 1*1 + idx_y * 1*1 * xdim6_update_halo_kernel1_l1 + idx_z * 1*1 * xdim6_update_halo_kernel1_l1 * ydim6_update_halo_kernel1_l1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel1_l1_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_l1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
#else
void ops_par_loop_update_halo_kernel1_l1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,8,range,17)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(17,"update_halo_kernel1_l1");
OPS_kernels[17].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel1_l1_h || ydim0 != ydim0_update_halo_kernel1_l1_h || xdim1 != xdim1_update_halo_kernel1_l1_h || ydim1 != ydim1_update_halo_kernel1_l1_h || xdim2 != xdim2_update_halo_kernel1_l1_h || ydim2 != ydim2_update_halo_kernel1_l1_h || xdim3 != xdim3_update_halo_kernel1_l1_h || ydim3 != ydim3_update_halo_kernel1_l1_h || xdim4 != xdim4_update_halo_kernel1_l1_h || ydim4 != ydim4_update_halo_kernel1_l1_h || xdim5 != xdim5_update_halo_kernel1_l1_h || ydim5 != ydim5_update_halo_kernel1_l1_h || xdim6 != xdim6_update_halo_kernel1_l1_h || ydim6 != ydim6_update_halo_kernel1_l1_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel1_l1, &xdim0, sizeof(int) );
xdim0_update_halo_kernel1_l1_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel1_l1, &ydim0, sizeof(int) );
ydim0_update_halo_kernel1_l1_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel1_l1, &xdim1, sizeof(int) );
xdim1_update_halo_kernel1_l1_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel1_l1, &ydim1, sizeof(int) );
ydim1_update_halo_kernel1_l1_h = ydim1;
cudaMemcpyToSymbol( xdim2_update_halo_kernel1_l1, &xdim2, sizeof(int) );
xdim2_update_halo_kernel1_l1_h = xdim2;
cudaMemcpyToSymbol( ydim2_update_halo_kernel1_l1, &ydim2, sizeof(int) );
ydim2_update_halo_kernel1_l1_h = ydim2;
cudaMemcpyToSymbol( xdim3_update_halo_kernel1_l1, &xdim3, sizeof(int) );
xdim3_update_halo_kernel1_l1_h = xdim3;
cudaMemcpyToSymbol( ydim3_update_halo_kernel1_l1, &ydim3, sizeof(int) );
ydim3_update_halo_kernel1_l1_h = ydim3;
cudaMemcpyToSymbol( xdim4_update_halo_kernel1_l1, &xdim4, sizeof(int) );
xdim4_update_halo_kernel1_l1_h = xdim4;
cudaMemcpyToSymbol( ydim4_update_halo_kernel1_l1, &ydim4, sizeof(int) );
ydim4_update_halo_kernel1_l1_h = ydim4;
cudaMemcpyToSymbol( xdim5_update_halo_kernel1_l1, &xdim5, sizeof(int) );
xdim5_update_halo_kernel1_l1_h = xdim5;
cudaMemcpyToSymbol( ydim5_update_halo_kernel1_l1, &ydim5, sizeof(int) );
ydim5_update_halo_kernel1_l1_h = ydim5;
cudaMemcpyToSymbol( xdim6_update_halo_kernel1_l1, &xdim6, sizeof(int) );
xdim6_update_halo_kernel1_l1_h = xdim6;
cudaMemcpyToSymbol( ydim6_update_halo_kernel1_l1, &ydim6, sizeof(int) );
ydim6_update_halo_kernel1_l1_h = ydim6;
}
int *arg7h = (int *)arg7.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[8];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[17].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel1_l1<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[17].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[17].mpi_time += t2-t1;
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_l1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 17;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 17;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg*)malloc(8*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int));
desc->args[7].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_l1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(17,"update_halo_kernel1_l1");
}
ops_enqueue_kernel(desc);
}
#endif
|
eaaf0679068e85140ef67e3365574e69ca25839f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/TensorUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/native/hip/SortingCommon.cuh>
#include <ATen/AccumulateType.h>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <THH/THHAtomics.cuh>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
/* This code computes the sum of the weights in two-steps:
1) Each GPU warp sums `NROWS_PER_THREAD` number of row given by `indeces`
2) Each partial-sum from 1) are summed and scatter into `grad_weight`
Notice, `NROWS_PER_THREAD` impacts the Achieved Occupancy of the
kernel execution. If it is high, the size of the thread blocks will be
too small to achieve good occupancy. Similarly, a very low value will
make the size of the thread blocks in the final sum in step 2) too small.
*/
constexpr int NROWS_PER_THREAD = 10;
// Fast ceil division (no overflow checking)
__host__ __device__ __forceinline__
int64_t ceil_div(int64_t x, int64_t y) {
return (x + y - 1) / y;
}
template <typename index_t>
__global__
void krn_partials_per_segment(index_t *ret, const index_t *segment_offsets,
int64_t num_of_segments, int64_t numel) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_segments) {
const int64_t idx_start = segment_offsets[id];
const int64_t idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
const int64_t size = idx_end - idx_start;
ret[id] = ceil_div(size, NROWS_PER_THREAD);
}
}
template <typename index_t>
__global__
void krn_partial_segment_offset(
index_t *ret,
const index_t *partials_per_segment,
const index_t *partials_per_segment_offset,
const index_t *segment_offsets,
int64_t num_of_segments) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_segments) {
index_t idx = partials_per_segment_offset[id];
const index_t num_partials = partials_per_segment[id];
const index_t segment_offset = segment_offsets[id];
for (int64_t i=0; i<num_partials; ++i) {
ret[idx++] = segment_offset + i * NROWS_PER_THREAD;
}
}
}
template <typename scalar_t, typename index_t>
__global__ void compute_grad_weight_bags(
index_t *indices, scalar_t *gradOutput,
index_t *offset2bag, index_t *count, ptrdiff_t numel,
int64_t stride, int mode_mean, const index_t *bag_size,
scalar_t* per_sample_weights, int64_t per_sample_weights_stride,
index_t* segment_offsets, int64_t num_of_segments,
acc_type<scalar_t, true> *grad_weight_per_segment,
const int64_t stride_warped) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_offsets[id];
const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
acc_type<scalar_t, true> weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
const int origRow = indices[idx];
const int seq_number = offset2bag[origRow];
const int gradOutputRow = seq_number * stride;
acc_type<scalar_t, true> scale = count ? 1.0 / count[idx] : 1.0;
if (per_sample_weights) {
scale *= per_sample_weights[origRow * per_sample_weights_stride];
}
acc_type<scalar_t, true> gradient = gradOutput[gradOutputRow + startFeature];
if (mode_mean) {
gradient /= bag_size[seq_number];
}
weight += gradient * scale;
}
grad_weight_per_segment[id * stride + startFeature] = weight;
}
template <typename scalar_t, typename index_t>
__global__ void compute_grad_weight(
index_t *indices,
scalar_t *gradOutput,
index_t *count,
ptrdiff_t numel,
int64_t stride,
index_t* segment_offsets,
int64_t num_of_segments,
acc_type<scalar_t, true> *grad_weight_per_segment,
const int64_t stride_warped) {
using accscalar_t = acc_type<scalar_t, true>;
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_offsets[id];
const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
accscalar_t weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
const index_t target_row = indices[idx];
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
weight += gradOutput[target_row * stride + startFeature] * scale;
}
grad_weight_per_segment[id * stride + startFeature] = weight;
}
// This kernel assumes that all input tensors are contiguous.
template <typename scalar_t, typename index_t>
__global__ void sum_and_scatter(
index_t *input, scalar_t *gradWeight, int64_t stride,
index_t* segment_offsets, int64_t num_of_segments,
const acc_type<scalar_t, true> *grad_weight_per_segment,
const index_t *segment_sizes_offsets, int64_t num_of_partial_segments,
const int64_t padding_idx,
const int64_t stride_warped) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_sizes_offsets[id];
const int idx_end = (id == num_of_segments-1)?num_of_partial_segments:segment_sizes_offsets[id+1];
acc_type<scalar_t, true> weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
weight += grad_weight_per_segment[idx*stride + startFeature];
}
int64_t target_row = input[segment_offsets[id]];
if (target_row != padding_idx) {
gradWeight[target_row * stride + startFeature] = weight;
}
}
} // anon namespace
Tensor embedding_backward_cuda_kernel(
const Tensor &grad,
const Tensor &orig_indices,
const Tensor &sorted_indices,
const Tensor &count,
int64_t num_weights,
int padding_idx,
bool mode_mean,
const Tensor &offset2bag,
const Tensor &bag_size,
const Tensor &per_sample_weights) {
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
const ptrdiff_t numel = sorted_indices.numel();
auto grad_weight = at::zeros({num_weights, grad.size(-1)}, grad.options());
const int64_t stride = grad_weight.stride(0);
// Compute the number of segments and their start position so that we do not have to
// spawn a warp per index. In this context, a segment is a number of rows that should
// be summarized.
// Unit: index in `sorted_indices` and `orig_indices`
AT_DISPATCH_INDEX_TYPES(orig_indices.scalar_type(), "embedding_backward_cuda_kernel", [&] () {
auto segment_offsets = at::empty({numel}, orig_indices.options());
int64_t num_of_segments;
{
auto sorted_indices_dev = thrust::device_ptr<index_t>(sorted_indices.data_ptr<index_t>());
auto dummy = at::empty_like(sorted_indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto dummy_dev = thrust::device_ptr<index_t>(dummy.data_ptr<index_t>());
auto ends = thrust::unique_by_key_copy(
policy,
sorted_indices_dev,
sorted_indices_dev + numel,
thrust::make_counting_iterator(0),
dummy_dev,
thrust::device_ptr<index_t>(segment_offsets.data_ptr<index_t>()));
num_of_segments = thrust::get<0>(ends) - dummy_dev;
}
// We split the segments up into sizes of `NROWS_PER_THREAD`
// Compute the number partial-segments per segment (some partial-segments
// may not be the full `NROWS_PER_THREAD` number of rows)
auto partials_per_segment = at::empty({num_of_segments}, orig_indices.options());
{
hipLaunchKernelGGL(( krn_partials_per_segment), dim3(ceil_div(num_of_segments, 32)), dim3(32), 0, stream,
partials_per_segment.data_ptr<index_t>(),
segment_offsets.data_ptr<index_t>(),
num_of_segments,
numel);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
// In order to compute `partial_segment_offset`, which is the start index
// of each partial-segment in `sorted_indices`, we need to compute the
// start position of each _segment_ in `partial_segment_offset`.
// Unit: index in `partial_segment_offset`
auto partials_per_segment_offset = at::empty({num_of_segments}, orig_indices.options());
thrust::exclusive_scan(
policy,
thrust::device_ptr<index_t>(partials_per_segment.data_ptr<index_t>()),
thrust::device_ptr<index_t>(partials_per_segment.data_ptr<index_t>()+num_of_segments),
thrust::device_ptr<index_t>(partials_per_segment_offset.data_ptr<index_t>()));
// The total number of partial-segments is the sum of `partials_per_segment_offset`
const int num_of_partial_segments = partials_per_segment[num_of_segments-1].item<index_t>() +
partials_per_segment_offset[num_of_segments-1].item<index_t>();
// Now we can compute the start position of each partial-segment
// Unit: index in `sorted_indices` and `orig_indices`
auto partial_segment_offset = at::empty({num_of_partial_segments}, orig_indices.options());
{
hipLaunchKernelGGL(( krn_partial_segment_offset), dim3(ceil_div(num_of_segments, 32)), dim3(32), 0, stream,
partial_segment_offset.data_ptr<index_t>(),
partials_per_segment.data_ptr<index_t>(),
partials_per_segment_offset.data_ptr<index_t>(),
segment_offsets.data_ptr<index_t>(),
num_of_segments);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
const int stride_warped = ceil_div(stride, C10_WARP_SIZE)*C10_WARP_SIZE;
const int block = ::min(stride_warped, MAX_BLOCK_SIZE);
const int grid = ceil_div(num_of_partial_segments*stride_warped, block);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
grad.scalar_type(), "embedding_bag_backward_cuda_compute_grad_weight", [&] {
// For numerical stability, the dtype of `grad_weight_per_segment`
// should match `acc_type`
using partial_weight_t = acc_type<scalar_t, true>;
TensorOptions op;
if(grad.dtype() == at::kHalf || grad.dtype() == at::kBFloat16) {
op = grad.options().dtype(at::kFloat);
} else {
op = grad.options();
}
auto grad_weight_per_segment = at::empty({num_of_partial_segments, stride}, op);
// Compute the sum of each partial-segment and handle bags
if (offset2bag.defined()) {
hipLaunchKernelGGL(( compute_grad_weight_bags<scalar_t>), dim3(grid), dim3(block), 0, stream,
orig_indices.data_ptr<index_t>(),
grad.data_ptr<scalar_t>(),
offset2bag.data_ptr<index_t>(),
count.defined() ? count.data_ptr<index_t>() : nullptr, numel, stride,
mode_mean, bag_size.data_ptr<index_t>(),
per_sample_weights.defined() ? per_sample_weights.data_ptr<scalar_t>() : NULL,
per_sample_weights.defined() ? per_sample_weights.stride(0) : 0,
partial_segment_offset.data_ptr<index_t>(),
num_of_partial_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(),
stride_warped);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( compute_grad_weight<scalar_t>), dim3(grid), dim3(block), 0, stream,
orig_indices.data_ptr<index_t>(),
grad.data_ptr<scalar_t>(),
count.defined() ? count.data_ptr<index_t>() : nullptr,
numel, stride,
partial_segment_offset.data_ptr<index_t>(),
num_of_partial_segments,
grad_weight_per_segment.data_ptr<partial_weight_t>(),
stride_warped);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
// Finally, we sum all the partial-sums and scatter them
// into `grad_weight`.
const int grid2 = ceil_div(num_of_segments*stride_warped, block);
hipLaunchKernelGGL(( sum_and_scatter<scalar_t>), dim3(grid2), dim3(block), 0, stream,
sorted_indices.data_ptr<index_t>(),
grad_weight.data_ptr<scalar_t>(),
stride,
segment_offsets.data_ptr<index_t>(),
num_of_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(),
partials_per_segment_offset.data_ptr<index_t>(),
num_of_partial_segments,
padding_idx,
stride_warped);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
return grad_weight;
}
}}
|
eaaf0679068e85140ef67e3365574e69ca25839f.cu
|
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/TensorUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/native/cuda/SortingCommon.cuh>
#include <ATen/AccumulateType.h>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCTensorMathReduce.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <THC/THCAtomics.cuh>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
/* This code computes the sum of the weights in two-steps:
1) Each GPU warp sums `NROWS_PER_THREAD` number of row given by `indeces`
2) Each partial-sum from 1) are summed and scatter into `grad_weight`
Notice, `NROWS_PER_THREAD` impacts the Achieved Occupancy of the
kernel execution. If it is high, the size of the thread blocks will be
too small to achieve good occupancy. Similarly, a very low value will
make the size of the thread blocks in the final sum in step 2) too small.
*/
constexpr int NROWS_PER_THREAD = 10;
// Fast ceil division (no overflow checking)
__host__ __device__ __forceinline__
int64_t ceil_div(int64_t x, int64_t y) {
return (x + y - 1) / y;
}
template <typename index_t>
__global__
void krn_partials_per_segment(index_t *ret, const index_t *segment_offsets,
int64_t num_of_segments, int64_t numel) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_segments) {
const int64_t idx_start = segment_offsets[id];
const int64_t idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
const int64_t size = idx_end - idx_start;
ret[id] = ceil_div(size, NROWS_PER_THREAD);
}
}
template <typename index_t>
__global__
void krn_partial_segment_offset(
index_t *ret,
const index_t *partials_per_segment,
const index_t *partials_per_segment_offset,
const index_t *segment_offsets,
int64_t num_of_segments) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_segments) {
index_t idx = partials_per_segment_offset[id];
const index_t num_partials = partials_per_segment[id];
const index_t segment_offset = segment_offsets[id];
for (int64_t i=0; i<num_partials; ++i) {
ret[idx++] = segment_offset + i * NROWS_PER_THREAD;
}
}
}
template <typename scalar_t, typename index_t>
__global__ void compute_grad_weight_bags(
index_t *indices, scalar_t *gradOutput,
index_t *offset2bag, index_t *count, ptrdiff_t numel,
int64_t stride, int mode_mean, const index_t *bag_size,
scalar_t* per_sample_weights, int64_t per_sample_weights_stride,
index_t* segment_offsets, int64_t num_of_segments,
acc_type<scalar_t, true> *grad_weight_per_segment,
const int64_t stride_warped) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_offsets[id];
const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
acc_type<scalar_t, true> weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
const int origRow = indices[idx];
const int seq_number = offset2bag[origRow];
const int gradOutputRow = seq_number * stride;
acc_type<scalar_t, true> scale = count ? 1.0 / count[idx] : 1.0;
if (per_sample_weights) {
scale *= per_sample_weights[origRow * per_sample_weights_stride];
}
acc_type<scalar_t, true> gradient = gradOutput[gradOutputRow + startFeature];
if (mode_mean) {
gradient /= bag_size[seq_number];
}
weight += gradient * scale;
}
grad_weight_per_segment[id * stride + startFeature] = weight;
}
template <typename scalar_t, typename index_t>
__global__ void compute_grad_weight(
index_t *indices,
scalar_t *gradOutput,
index_t *count,
ptrdiff_t numel,
int64_t stride,
index_t* segment_offsets,
int64_t num_of_segments,
acc_type<scalar_t, true> *grad_weight_per_segment,
const int64_t stride_warped) {
using accscalar_t = acc_type<scalar_t, true>;
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_offsets[id];
const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
accscalar_t weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
const index_t target_row = indices[idx];
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
weight += gradOutput[target_row * stride + startFeature] * scale;
}
grad_weight_per_segment[id * stride + startFeature] = weight;
}
// This kernel assumes that all input tensors are contiguous.
template <typename scalar_t, typename index_t>
__global__ void sum_and_scatter(
index_t *input, scalar_t *gradWeight, int64_t stride,
index_t* segment_offsets, int64_t num_of_segments,
const acc_type<scalar_t, true> *grad_weight_per_segment,
const index_t *segment_sizes_offsets, int64_t num_of_partial_segments,
const int64_t padding_idx,
const int64_t stride_warped) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_sizes_offsets[id];
const int idx_end = (id == num_of_segments-1)?num_of_partial_segments:segment_sizes_offsets[id+1];
acc_type<scalar_t, true> weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
weight += grad_weight_per_segment[idx*stride + startFeature];
}
int64_t target_row = input[segment_offsets[id]];
if (target_row != padding_idx) {
gradWeight[target_row * stride + startFeature] = weight;
}
}
} // anon namespace
Tensor embedding_backward_cuda_kernel(
const Tensor &grad,
const Tensor &orig_indices,
const Tensor &sorted_indices,
const Tensor &count,
int64_t num_weights,
int padding_idx,
bool mode_mean,
const Tensor &offset2bag,
const Tensor &bag_size,
const Tensor &per_sample_weights) {
auto stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
const ptrdiff_t numel = sorted_indices.numel();
auto grad_weight = at::zeros({num_weights, grad.size(-1)}, grad.options());
const int64_t stride = grad_weight.stride(0);
// Compute the number of segments and their start position so that we do not have to
// spawn a warp per index. In this context, a segment is a number of rows that should
// be summarized.
// Unit: index in `sorted_indices` and `orig_indices`
AT_DISPATCH_INDEX_TYPES(orig_indices.scalar_type(), "embedding_backward_cuda_kernel", [&] () {
auto segment_offsets = at::empty({numel}, orig_indices.options());
int64_t num_of_segments;
{
auto sorted_indices_dev = thrust::device_ptr<index_t>(sorted_indices.data_ptr<index_t>());
auto dummy = at::empty_like(sorted_indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto dummy_dev = thrust::device_ptr<index_t>(dummy.data_ptr<index_t>());
auto ends = thrust::unique_by_key_copy(
policy,
sorted_indices_dev,
sorted_indices_dev + numel,
thrust::make_counting_iterator(0),
dummy_dev,
thrust::device_ptr<index_t>(segment_offsets.data_ptr<index_t>()));
num_of_segments = thrust::get<0>(ends) - dummy_dev;
}
// We split the segments up into sizes of `NROWS_PER_THREAD`
// Compute the number partial-segments per segment (some partial-segments
// may not be the full `NROWS_PER_THREAD` number of rows)
auto partials_per_segment = at::empty({num_of_segments}, orig_indices.options());
{
krn_partials_per_segment<<<ceil_div(num_of_segments, 32), 32, 0, stream>>> (
partials_per_segment.data_ptr<index_t>(),
segment_offsets.data_ptr<index_t>(),
num_of_segments,
numel);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
// In order to compute `partial_segment_offset`, which is the start index
// of each partial-segment in `sorted_indices`, we need to compute the
// start position of each _segment_ in `partial_segment_offset`.
// Unit: index in `partial_segment_offset`
auto partials_per_segment_offset = at::empty({num_of_segments}, orig_indices.options());
thrust::exclusive_scan(
policy,
thrust::device_ptr<index_t>(partials_per_segment.data_ptr<index_t>()),
thrust::device_ptr<index_t>(partials_per_segment.data_ptr<index_t>()+num_of_segments),
thrust::device_ptr<index_t>(partials_per_segment_offset.data_ptr<index_t>()));
// The total number of partial-segments is the sum of `partials_per_segment_offset`
const int num_of_partial_segments = partials_per_segment[num_of_segments-1].item<index_t>() +
partials_per_segment_offset[num_of_segments-1].item<index_t>();
// Now we can compute the start position of each partial-segment
// Unit: index in `sorted_indices` and `orig_indices`
auto partial_segment_offset = at::empty({num_of_partial_segments}, orig_indices.options());
{
krn_partial_segment_offset<<<ceil_div(num_of_segments, 32), 32, 0, stream>>> (
partial_segment_offset.data_ptr<index_t>(),
partials_per_segment.data_ptr<index_t>(),
partials_per_segment_offset.data_ptr<index_t>(),
segment_offsets.data_ptr<index_t>(),
num_of_segments);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
const int stride_warped = ceil_div(stride, C10_WARP_SIZE)*C10_WARP_SIZE;
const int block = std::min(stride_warped, MAX_BLOCK_SIZE);
const int grid = ceil_div(num_of_partial_segments*stride_warped, block);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
grad.scalar_type(), "embedding_bag_backward_cuda_compute_grad_weight", [&] {
// For numerical stability, the dtype of `grad_weight_per_segment`
// should match `acc_type`
using partial_weight_t = acc_type<scalar_t, true>;
TensorOptions op;
if(grad.dtype() == at::kHalf || grad.dtype() == at::kBFloat16) {
op = grad.options().dtype(at::kFloat);
} else {
op = grad.options();
}
auto grad_weight_per_segment = at::empty({num_of_partial_segments, stride}, op);
// Compute the sum of each partial-segment and handle bags
if (offset2bag.defined()) {
compute_grad_weight_bags<scalar_t><<<grid, block, 0, stream>>>(
orig_indices.data_ptr<index_t>(),
grad.data_ptr<scalar_t>(),
offset2bag.data_ptr<index_t>(),
count.defined() ? count.data_ptr<index_t>() : nullptr, numel, stride,
mode_mean, bag_size.data_ptr<index_t>(),
per_sample_weights.defined() ? per_sample_weights.data_ptr<scalar_t>() : NULL,
per_sample_weights.defined() ? per_sample_weights.stride(0) : 0,
partial_segment_offset.data_ptr<index_t>(),
num_of_partial_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(),
stride_warped);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
compute_grad_weight<scalar_t><<<grid, block, 0, stream>>>(
orig_indices.data_ptr<index_t>(),
grad.data_ptr<scalar_t>(),
count.defined() ? count.data_ptr<index_t>() : nullptr,
numel, stride,
partial_segment_offset.data_ptr<index_t>(),
num_of_partial_segments,
grad_weight_per_segment.data_ptr<partial_weight_t>(),
stride_warped);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
// Finally, we sum all the partial-sums and scatter them
// into `grad_weight`.
const int grid2 = ceil_div(num_of_segments*stride_warped, block);
sum_and_scatter<scalar_t><<<grid2, block, 0, stream>>>(
sorted_indices.data_ptr<index_t>(),
grad_weight.data_ptr<scalar_t>(),
stride,
segment_offsets.data_ptr<index_t>(),
num_of_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(),
partials_per_segment_offset.data_ptr<index_t>(),
num_of_partial_segments,
padding_idx,
stride_warped);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
return grad_weight;
}
}}
|
48073538798fde72c707b32431a78f93e45a0ab3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
__global__ void AuctionMatchKernel(int b,int n,const float * __restrict__ xyz1,const float * __restrict__ xyz2,int * matchl,int * matchr,float * cost){
//this kernel handles up to 4096 points
const int NMax=4096;
__shared__ short Queue[NMax];
__shared__ short matchrbuf[NMax];
__shared__ float pricer[NMax];
__shared__ float bests[32][3];
__shared__ int qhead,qlen;
const int BufLen=2048;
__shared__ float buf[BufLen];
for (int bno=blockIdx.x;bno<b;bno+=gridDim.x){
int cnt=0;
float tolerance=1e-4;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
matchl[bno*n+j]=-1;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
matchrbuf[j]=-1;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
Queue[j]=j;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
pricer[j]=0;
const int Block=512;
for (int k0=0;k0<n;k0+=Block){
int k1=min(n,k0+Block);
for (int k=threadIdx.x;k<(k1-k0)*3;k+=blockDim.x)
buf[k]=xyz1[bno*n*3+k0*3+k];
__syncthreads();
for (int j=threadIdx.x;j<n;j+=blockDim.x){
float x2=xyz2[bno*n*3+j*3+0];
float y2=xyz2[bno*n*3+j*3+1];
float z2=xyz2[bno*n*3+j*3+2];
for (int k=k0;k<k1;k++){
float x1=buf[(k-k0)*3+0];
float y1=buf[(k-k0)*3+1];
float z1=buf[(k-k0)*3+2];
float d=sqrtf((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2));
cost[blockIdx.x*n*n+k*n+j]=d;
}
}
__syncthreads();
}
//calculate the distacne
if (threadIdx.x==0){
qhead=0;
qlen=n;
}
__syncthreads();
int loaded=0;
float value9,value10,value11,value12,value13,value14,value15,value16;
while (qlen){
int i=Queue[qhead];
int i2;
if (qhead+1<n)
i2=Queue[qhead+1];
else
i2=Queue[0];
float best=1e38f,best2=1e38f;
int bestj=0;
if (n==blockDim.x*8){
int j=threadIdx.x;
float value1,value2,value3,value4,value5,value6,value7,value8;
if (loaded){
value1=value9+pricer[j];
value2=value10+pricer[j+blockDim.x];
value3=value11+pricer[j+blockDim.x*2];
value4=value12+pricer[j+blockDim.x*3];
value5=value13+pricer[j+blockDim.x*4];
value6=value14+pricer[j+blockDim.x*5];
value7=value15+pricer[j+blockDim.x*6];
value8=value16+pricer[j+blockDim.x*7];
loaded=0;
}else{
value1=cost[blockIdx.x*n*n+i*n+j]+pricer[j];
value2=cost[blockIdx.x*n*n+i*n+j+blockDim.x]+pricer[j+blockDim.x];
value3=cost[blockIdx.x*n*n+i*n+j+blockDim.x*2]+pricer[j+blockDim.x*2];
value4=cost[blockIdx.x*n*n+i*n+j+blockDim.x*3]+pricer[j+blockDim.x*3];
value5=cost[blockIdx.x*n*n+i*n+j+blockDim.x*4]+pricer[j+blockDim.x*4];
value6=cost[blockIdx.x*n*n+i*n+j+blockDim.x*5]+pricer[j+blockDim.x*5];
value7=cost[blockIdx.x*n*n+i*n+j+blockDim.x*6]+pricer[j+blockDim.x*6];
value8=cost[blockIdx.x*n*n+i*n+j+blockDim.x*7]+pricer[j+blockDim.x*7];
value9=cost[blockIdx.x*n*n+i2*n+j];
value10=cost[blockIdx.x*n*n+i2*n+j+blockDim.x];
value11=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*2];
value12=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*3];
value13=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*4];
value14=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*5];
value15=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*6];
value16=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*7];
loaded=qlen>1;
}
int vj,vj2,vj3,vj4;
if (value1<value2){
vj=j;
}else{
vj=j+blockDim.x;
float t=value1;
value1=value2;
value2=t;
}
if (value3<value4){
vj2=j+blockDim.x*2;
}else{
vj2=j+blockDim.x*3;
float t=value3;
value3=value4;
value4=t;
}
if (value5<value6){
vj3=j+blockDim.x*4;
}else{
vj3=j+blockDim.x*5;
float t=value5;
value5=value6;
value6=t;
}
if (value7<value8){
vj4=j+blockDim.x*6;
}else{
vj4=j+blockDim.x*7;
float t=value7;
value7=value8;
value8=t;
}
if (value1<value3){
value2=fminf(value2,value3);
}else{
value2=fminf(value1,value4);
value1=value3;
vj=vj2;
}
if (value5<value7){
value6=fminf(value6,value7);
}else{
value6=fminf(value5,value8);
value5=value7;
vj3=vj4;
}
if (value1<value5){
best=value1;
bestj=vj;
best2=fminf(value2,value5);
}else{
best2=fminf(value1,value6);
best=value5;
bestj=vj3;
}
}else if (n>=blockDim.x*4){
for (int j=threadIdx.x;j<n;j+=blockDim.x*4){
float value1=cost[blockIdx.x*n*n+i*n+j]+pricer[j];
float value2=cost[blockIdx.x*n*n+i*n+j+blockDim.x]+pricer[j+blockDim.x];
float value3=cost[blockIdx.x*n*n+i*n+j+blockDim.x*2]+pricer[j+blockDim.x*2];
float value4=cost[blockIdx.x*n*n+i*n+j+blockDim.x*3]+pricer[j+blockDim.x*3];
int vj,vj2;
if (value1<value2){
vj=j;
}else{
vj=j+blockDim.x;
float t=value1;
value1=value2;
value2=t;
}
if (value3<value4){
vj2=j+blockDim.x*2;
}else{
vj2=j+blockDim.x*3;
float t=value3;
value3=value4;
value4=t;
}
if (value1<value3){
value2=fminf(value2,value3);
}else{
value2=fminf(value1,value4);
value1=value3;
vj=vj2;
}
if (best<value1){
best2=fminf(best2,value1);
}else{
best2=fminf(best,value2);
best=value1;
bestj=vj;
}
}
}else if (n>=blockDim.x*2){
for (int j=threadIdx.x;j<n;j+=blockDim.x*2){
float value1=cost[blockIdx.x*n*n+i*n+j]+pricer[j];
float value2=cost[blockIdx.x*n*n+i*n+j+blockDim.x]+pricer[j+blockDim.x];
int vj;
if (value1<value2){
vj=j;
}else{
vj=j+blockDim.x;
float t=value1;
value1=value2;
value2=t;
}
if (best<value1){
best2=fminf(best2,value1);
}else{
best2=fminf(best,value2);
best=value1;
bestj=vj;
}
}
}else{
for (int j=threadIdx.x;j<n;j+=blockDim.x){
float value=cost[blockIdx.x*n*n+i*n+j]+pricer[j];
if (best<value){
best2=fminf(best2,value);
}else{
best2=best;
bestj=j;
best=value;
}
}
}
for (int i=16;i>0;i>>=1){
float b1=__shfl_down(best,i,32);
float b2=__shfl_down(best2,i,32);
int bj=__shfl_down(bestj,i,32);
if (best<b1){
best2=fminf(b1,best2);
}else{
best=b1;
best2=fminf(best,b2);
bestj=bj;
}
}
if ((threadIdx.x&31)==0){
bests[threadIdx.x>>5][0]=best;
bests[threadIdx.x>>5][1]=best2;
*(int*)&bests[threadIdx.x>>5][2]=bestj;
}
__syncthreads();
int nn=blockDim.x>>5;
if (threadIdx.x<nn){
best=bests[threadIdx.x][0];
best2=bests[threadIdx.x][1];
bestj=*(int*)&bests[threadIdx.x][2];
for (int i=nn>>1;i>0;i>>=1){
float b1=__shfl_down(best,i,32);
float b2=__shfl_down(best2,i,32);
int bj=__shfl_down(bestj,i,32);
if (best<b1){
best2=fminf(b1,best2);
}else{
best=b1;
best2=fminf(best,b2);
bestj=bj;
}
}
}
if (threadIdx.x==0){
float delta=best2-best+tolerance;
qhead++;
qlen--;
if (qhead>=n)
qhead-=n;
int old=matchrbuf[bestj];
pricer[bestj]+=delta;
cnt++;
if (old!=-1){
int ql=qlen;
int tail=qhead+ql;
qlen=ql+1;
if (tail>=n)
tail-=n;
Queue[tail]=old;
}
if (cnt==(40*n)){
if (tolerance==1.0)
qlen=0;
tolerance=fminf(1.0,tolerance*100);
cnt=0;
}
}
__syncthreads();
if (threadIdx.x==0){
matchrbuf[bestj]=i;
}
}
__syncthreads();
for (int j=threadIdx.x;j<n;j+=blockDim.x)
matchr[bno*n+j]=matchrbuf[j];
for (int j=threadIdx.x;j<n;j+=blockDim.x)
matchl[bno*n+matchrbuf[j]]=j;
__syncthreads();
}
}
void AuctionMatchLauncher(int b,int n,const float * xyz1,const float * xyz2,int * matchl,int * matchr,float * cost){
hipLaunchKernelGGL(( AuctionMatchKernel), dim3(32),dim3(512), 0, 0, b,n,xyz1,xyz2,matchl,matchr,cost);
}
|
48073538798fde72c707b32431a78f93e45a0ab3.cu
|
#include <cstdio>
__global__ void AuctionMatchKernel(int b,int n,const float * __restrict__ xyz1,const float * __restrict__ xyz2,int * matchl,int * matchr,float * cost){
//this kernel handles up to 4096 points
const int NMax=4096;
__shared__ short Queue[NMax];
__shared__ short matchrbuf[NMax];
__shared__ float pricer[NMax];
__shared__ float bests[32][3];
__shared__ int qhead,qlen;
const int BufLen=2048;
__shared__ float buf[BufLen];
for (int bno=blockIdx.x;bno<b;bno+=gridDim.x){
int cnt=0;
float tolerance=1e-4;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
matchl[bno*n+j]=-1;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
matchrbuf[j]=-1;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
Queue[j]=j;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
pricer[j]=0;
const int Block=512;
for (int k0=0;k0<n;k0+=Block){
int k1=min(n,k0+Block);
for (int k=threadIdx.x;k<(k1-k0)*3;k+=blockDim.x)
buf[k]=xyz1[bno*n*3+k0*3+k];
__syncthreads();
for (int j=threadIdx.x;j<n;j+=blockDim.x){
float x2=xyz2[bno*n*3+j*3+0];
float y2=xyz2[bno*n*3+j*3+1];
float z2=xyz2[bno*n*3+j*3+2];
for (int k=k0;k<k1;k++){
float x1=buf[(k-k0)*3+0];
float y1=buf[(k-k0)*3+1];
float z1=buf[(k-k0)*3+2];
float d=sqrtf((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2));
cost[blockIdx.x*n*n+k*n+j]=d;
}
}
__syncthreads();
}
//calculate the distacne
if (threadIdx.x==0){
qhead=0;
qlen=n;
}
__syncthreads();
int loaded=0;
float value9,value10,value11,value12,value13,value14,value15,value16;
while (qlen){
int i=Queue[qhead];
int i2;
if (qhead+1<n)
i2=Queue[qhead+1];
else
i2=Queue[0];
float best=1e38f,best2=1e38f;
int bestj=0;
if (n==blockDim.x*8){
int j=threadIdx.x;
float value1,value2,value3,value4,value5,value6,value7,value8;
if (loaded){
value1=value9+pricer[j];
value2=value10+pricer[j+blockDim.x];
value3=value11+pricer[j+blockDim.x*2];
value4=value12+pricer[j+blockDim.x*3];
value5=value13+pricer[j+blockDim.x*4];
value6=value14+pricer[j+blockDim.x*5];
value7=value15+pricer[j+blockDim.x*6];
value8=value16+pricer[j+blockDim.x*7];
loaded=0;
}else{
value1=cost[blockIdx.x*n*n+i*n+j]+pricer[j];
value2=cost[blockIdx.x*n*n+i*n+j+blockDim.x]+pricer[j+blockDim.x];
value3=cost[blockIdx.x*n*n+i*n+j+blockDim.x*2]+pricer[j+blockDim.x*2];
value4=cost[blockIdx.x*n*n+i*n+j+blockDim.x*3]+pricer[j+blockDim.x*3];
value5=cost[blockIdx.x*n*n+i*n+j+blockDim.x*4]+pricer[j+blockDim.x*4];
value6=cost[blockIdx.x*n*n+i*n+j+blockDim.x*5]+pricer[j+blockDim.x*5];
value7=cost[blockIdx.x*n*n+i*n+j+blockDim.x*6]+pricer[j+blockDim.x*6];
value8=cost[blockIdx.x*n*n+i*n+j+blockDim.x*7]+pricer[j+blockDim.x*7];
value9=cost[blockIdx.x*n*n+i2*n+j];
value10=cost[blockIdx.x*n*n+i2*n+j+blockDim.x];
value11=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*2];
value12=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*3];
value13=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*4];
value14=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*5];
value15=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*6];
value16=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*7];
loaded=qlen>1;
}
int vj,vj2,vj3,vj4;
if (value1<value2){
vj=j;
}else{
vj=j+blockDim.x;
float t=value1;
value1=value2;
value2=t;
}
if (value3<value4){
vj2=j+blockDim.x*2;
}else{
vj2=j+blockDim.x*3;
float t=value3;
value3=value4;
value4=t;
}
if (value5<value6){
vj3=j+blockDim.x*4;
}else{
vj3=j+blockDim.x*5;
float t=value5;
value5=value6;
value6=t;
}
if (value7<value8){
vj4=j+blockDim.x*6;
}else{
vj4=j+blockDim.x*7;
float t=value7;
value7=value8;
value8=t;
}
if (value1<value3){
value2=fminf(value2,value3);
}else{
value2=fminf(value1,value4);
value1=value3;
vj=vj2;
}
if (value5<value7){
value6=fminf(value6,value7);
}else{
value6=fminf(value5,value8);
value5=value7;
vj3=vj4;
}
if (value1<value5){
best=value1;
bestj=vj;
best2=fminf(value2,value5);
}else{
best2=fminf(value1,value6);
best=value5;
bestj=vj3;
}
}else if (n>=blockDim.x*4){
for (int j=threadIdx.x;j<n;j+=blockDim.x*4){
float value1=cost[blockIdx.x*n*n+i*n+j]+pricer[j];
float value2=cost[blockIdx.x*n*n+i*n+j+blockDim.x]+pricer[j+blockDim.x];
float value3=cost[blockIdx.x*n*n+i*n+j+blockDim.x*2]+pricer[j+blockDim.x*2];
float value4=cost[blockIdx.x*n*n+i*n+j+blockDim.x*3]+pricer[j+blockDim.x*3];
int vj,vj2;
if (value1<value2){
vj=j;
}else{
vj=j+blockDim.x;
float t=value1;
value1=value2;
value2=t;
}
if (value3<value4){
vj2=j+blockDim.x*2;
}else{
vj2=j+blockDim.x*3;
float t=value3;
value3=value4;
value4=t;
}
if (value1<value3){
value2=fminf(value2,value3);
}else{
value2=fminf(value1,value4);
value1=value3;
vj=vj2;
}
if (best<value1){
best2=fminf(best2,value1);
}else{
best2=fminf(best,value2);
best=value1;
bestj=vj;
}
}
}else if (n>=blockDim.x*2){
for (int j=threadIdx.x;j<n;j+=blockDim.x*2){
float value1=cost[blockIdx.x*n*n+i*n+j]+pricer[j];
float value2=cost[blockIdx.x*n*n+i*n+j+blockDim.x]+pricer[j+blockDim.x];
int vj;
if (value1<value2){
vj=j;
}else{
vj=j+blockDim.x;
float t=value1;
value1=value2;
value2=t;
}
if (best<value1){
best2=fminf(best2,value1);
}else{
best2=fminf(best,value2);
best=value1;
bestj=vj;
}
}
}else{
for (int j=threadIdx.x;j<n;j+=blockDim.x){
float value=cost[blockIdx.x*n*n+i*n+j]+pricer[j];
if (best<value){
best2=fminf(best2,value);
}else{
best2=best;
bestj=j;
best=value;
}
}
}
for (int i=16;i>0;i>>=1){
float b1=__shfl_down(best,i,32);
float b2=__shfl_down(best2,i,32);
int bj=__shfl_down(bestj,i,32);
if (best<b1){
best2=fminf(b1,best2);
}else{
best=b1;
best2=fminf(best,b2);
bestj=bj;
}
}
if ((threadIdx.x&31)==0){
bests[threadIdx.x>>5][0]=best;
bests[threadIdx.x>>5][1]=best2;
*(int*)&bests[threadIdx.x>>5][2]=bestj;
}
__syncthreads();
int nn=blockDim.x>>5;
if (threadIdx.x<nn){
best=bests[threadIdx.x][0];
best2=bests[threadIdx.x][1];
bestj=*(int*)&bests[threadIdx.x][2];
for (int i=nn>>1;i>0;i>>=1){
float b1=__shfl_down(best,i,32);
float b2=__shfl_down(best2,i,32);
int bj=__shfl_down(bestj,i,32);
if (best<b1){
best2=fminf(b1,best2);
}else{
best=b1;
best2=fminf(best,b2);
bestj=bj;
}
}
}
if (threadIdx.x==0){
float delta=best2-best+tolerance;
qhead++;
qlen--;
if (qhead>=n)
qhead-=n;
int old=matchrbuf[bestj];
pricer[bestj]+=delta;
cnt++;
if (old!=-1){
int ql=qlen;
int tail=qhead+ql;
qlen=ql+1;
if (tail>=n)
tail-=n;
Queue[tail]=old;
}
if (cnt==(40*n)){
if (tolerance==1.0)
qlen=0;
tolerance=fminf(1.0,tolerance*100);
cnt=0;
}
}
__syncthreads();
if (threadIdx.x==0){
matchrbuf[bestj]=i;
}
}
__syncthreads();
for (int j=threadIdx.x;j<n;j+=blockDim.x)
matchr[bno*n+j]=matchrbuf[j];
for (int j=threadIdx.x;j<n;j+=blockDim.x)
matchl[bno*n+matchrbuf[j]]=j;
__syncthreads();
}
}
void AuctionMatchLauncher(int b,int n,const float * xyz1,const float * xyz2,int * matchl,int * matchr,float * cost){
AuctionMatchKernel<<<32,512>>>(b,n,xyz1,xyz2,matchl,matchr,cost);
}
|
cb5730903afcce0e2d7a93fef6d9aa01cbf7f8ac.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/hip/HIPBlas.h>
#include <ATen/Dispatch.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/LinearAlgebra.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
namespace at { namespace native {
Tensor prepare_matrix_for_cublas(Tensor& tensor, bool& transpose_tensor) {
Tensor tensor_;
IntArrayRef tensor_strides = tensor.strides();
IntArrayRef tensor_sizes = tensor.sizes();
if ((tensor_strides[0] == 1) && (tensor_strides[1] >= std::max<int64_t>(1, tensor_sizes[0]))) {
tensor_ = tensor;
transpose_tensor = false;
} else if ((tensor_strides[1] == 1) && (tensor_strides[0] >= std::max<int64_t>(1, tensor_sizes[1]))) {
tensor_ = tensor;
transpose_tensor = true;
} else {
transpose_tensor = true;
tensor_ = tensor.clone(at::MemoryFormat::Contiguous);
}
return tensor_;
}
Tensor prepare_batch_matrix_for_cublas(const Tensor& tensor, bool& transpose_tensor, int64_t& ld_tensor, bool transpose_result, int64_t m, int64_t n) {
IntArrayRef tensor_strides = tensor.strides();
Tensor tensor_;
int fast_dim = transpose_result ? 2 : 1;
int leading_dim = transpose_result ? 1 : 2;
if (tensor_strides[fast_dim] == 1 &&
(tensor_strides[leading_dim] >= std::max<int64_t>(1, m))) {
transpose_tensor = false;
tensor_ = tensor;
ld_tensor = tensor_strides[leading_dim];
} else if ((tensor_strides[leading_dim] == 1) &&
(tensor_strides[fast_dim] >= std::max<int64_t>(1, n))) {
transpose_tensor = true;
tensor_ = tensor;
ld_tensor = tensor_strides[fast_dim];
} else {
transpose_tensor = !transpose_result;
if (tensor.is_contiguous()) {
tensor_ = tensor;
} else {
tensor_ = tensor.clone(at::MemoryFormat::Contiguous);
}
ld_tensor = tensor_.stride(1);
}
return tensor_;
}
namespace {
Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
TORCH_CHECK(mat1.dim() == 2 && mat2.dim() == 2, "tensors must be 2-D");
TensorArg args[]{{result, "out", 0}, {self, "self", 1}, {mat1, "mat1", 2}, {mat2, "mat2", 3}};
checkAllSameGPU("addmm", args);
Tensor self_;
if (&result != &self) {
std::tie(self_) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
} else {
self_ = self;
}
IntArrayRef mat1_sizes = mat1.sizes();
IntArrayRef mat2_sizes = mat2.sizes();
IntArrayRef self__sizes = self_.sizes();
TORCH_CHECK(mat1_sizes[1] == mat2_sizes[0], "mat1 dim 1 must match mat2 dim 0");
TORCH_CHECK(self__sizes[0] == mat1_sizes[0], "self_ dim 0 must match mat1 dim 0");
TORCH_CHECK(self__sizes[1] == mat2_sizes[1], "self_ dim 1 must match mat2 dim 1");
if (&result != &self) {
at::native::resize_as_(result, self_);
if (beta.toComplexDouble() != 0.0) {
at::native::copy_(result, self_);
}
}
TORCH_CHECK(result.dim() == 2 && self_.dim() == 2, "tensors must be 2-D");
IntArrayRef result_sizes = result.sizes();
if ((result_sizes[0] == 0) || (result_sizes[1] == 0)) {
return result;
}
bool transpose_result;
Tensor result_ = prepare_matrix_for_cublas(result, transpose_result);
bool transpose_mat1;
bool transpose_mat2;
Tensor mat1_ = transpose_result ? mat2 : mat1;
Tensor mat2_ = transpose_result ? mat1 : mat2;
mat1_ = prepare_matrix_for_cublas(mat1_, transpose_mat1);
mat2_ = prepare_matrix_for_cublas(mat2_, transpose_mat2);
if (transpose_result) {
transpose_mat1 = !transpose_mat1;
transpose_mat2 = !transpose_mat2;
mat1_sizes = mat1_.sizes();
mat2_sizes = mat2_.sizes();
}
int64_t m = mat1_sizes[transpose_result ? 1 : 0];
int64_t k = mat1_sizes[transpose_result ? 0 : 1];
int64_t n = mat2_sizes[transpose_result ? 0 : 1];
int64_t mat1_ld = mat1_.stride((transpose_mat1 == transpose_result) ? 1 : 0);
int64_t mat2_ld = mat2_.stride((transpose_mat2 == transpose_result) ? 1 : 0);
int64_t result_ld = result_.stride(transpose_result ? 0 : 1);
at::ScalarType scalar_type = self_.scalar_type();
if (mat1.numel() == 0) {
// By definition, when beta==0, values in self should be ignored. nans and infs
// should not propagate
if (beta.toComplexDouble() == 0.) {
return result.zero_();
}
return at::native::mul_out(result, self, at::native::scalar_tensor(beta, at::device(at::kCPU).dtype(self.scalar_type())));
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "addmm_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t* mat1_ptr = mat1_.data_ptr<scalar_t>();
scalar_t* mat2_ptr = mat2_.data_ptr<scalar_t>();
scalar_t* result_ptr = result_.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
transpose_mat1 ? 't' : 'n',
transpose_mat2 ? 't' : 'n',
m, n, k,
alpha_val,
mat1_ptr, mat1_ld,
mat2_ptr, mat2_ld,
beta_val,
result_ptr, result_ld
);
});
if (result.data_ptr() != result_.data_ptr()) {
result.copy_(result_);
}
return result;
}
Tensor& baddbmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
TORCH_CHECK(self.dim() == 3, "self must be a 3D tensor");
TORCH_CHECK(batch1.dim() == 3, "batch1 must be a 3D tensor");
TORCH_CHECK(batch2.dim() == 3, "batch2 must be a 3D tensor");
TensorArg args[]{{result, "out", 0}, {self, "self", 1}, {batch1, "batch1", 2}, {batch2, "batch2", 3}};
checkAllSameGPU("baddbmm", args);
IntArrayRef batch1_sizes = batch1.sizes();
IntArrayRef batch2_sizes = batch2.sizes();
IntArrayRef self_sizes = self.sizes();
TORCH_CHECK(self_sizes[0] == batch1_sizes[0], "self dim 0 must match batch1 dim 0");
TORCH_CHECK(self_sizes[0] == batch2_sizes[0], "self dim 0 must match batch2 dim 0");
TORCH_CHECK(self_sizes[1] == batch1_sizes[1], "self dim 1 must match batch1 dim 1");
TORCH_CHECK(self_sizes[2] == batch2_sizes[2], "self dim 2 must match batch2 dim 2");
TORCH_CHECK(batch1_sizes[2] == batch2_sizes[1], "batch1 dim 2 must match batch2 dim 1");
if (!result.is_same(self)) {
result.resize_as_(self);
if (beta.to<c10::complex<double>>() != 0.0) {
result.copy_(self);
}
}
bool transpose_result = false;
Tensor result_;
IntArrayRef result_strides = result.strides();
IntArrayRef result_sizes = result.sizes();
if ((result_strides[1] == 1) &&
((result_sizes[2] == 1) || (result_strides[2] >= std::max<int64_t>(1, result_sizes[1])))) {
result_ = result;
} else if ((result_strides[2] == 1) &&
(result_sizes[1] == 1 || (result_strides[1] >= std::max<int64_t>(1, result_sizes[2])))) {
transpose_result = true;
result_ = result;
} else {
result_ = result.transpose(1, 2).clone(at::MemoryFormat::Contiguous);
result_ = result_.transpose(1, 2);
}
int leading_dim = transpose_result ? 1 : 2;
Tensor batch1_ = transpose_result ? batch2 : batch1;
Tensor batch2_ = transpose_result ? batch1 : batch2;
int64_t m = result_sizes[transpose_result ? 2 : 1];
int64_t n = result_sizes[leading_dim];
int64_t k = batch1_.size(leading_dim);
int64_t lda, ldb, ldc;
bool transpose_batch1, transpose_batch2;
batch1_ = prepare_batch_matrix_for_cublas(batch1_, transpose_batch1, lda, transpose_result, m, k);
batch2_ = prepare_batch_matrix_for_cublas(batch2_, transpose_batch2, ldb, transpose_result, k, n);
ldc = result_.stride(leading_dim);
int64_t num_batches = result_.size(0);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "baddbmm_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t* batch1_ptr = batch1_.data_ptr<scalar_t>();
scalar_t* batch2_ptr = batch2_.data_ptr<scalar_t>();
scalar_t* result_ptr = result_.data_ptr<scalar_t>();
at::cuda::blas::bgemm<scalar_t>(
transpose_batch1 ? 't' : 'n',
transpose_batch2 ? 't' : 'n',
m, n, k,
alpha_val,
batch1_ptr, lda, batch1_.stride(0),
batch2_ptr, ldb, batch2_.stride(0),
beta_val,
result_ptr, ldc, result_.stride(0),
num_batches
);
});
if (!result.is_same(result_)) {
result.copy_(result_);
}
return result;
}
} // anonymous namespace
Tensor& mm_out_cuda(Tensor& result, const Tensor& self, const Tensor& mat2) {
result.resize_({ self.size(0), mat2.size(1) });
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor mm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({ self.size(0), mat2.size(1) }, self.options());
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor& addmm_out_cuda(Tensor &out, const Tensor &self,
const Tensor &mat1, const Tensor &mat2,
Scalar beta, Scalar alpha) {
{
at::NoNamesGuard guard;
Tensor& result = addmm_out_cuda_impl(out, self, mat1, mat2, beta, alpha);
}
at::namedinference::propagate_names_for_addmm(out, mat1, mat2, self);
return out;
}
Tensor addmm_cuda(const Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
Tensor out = at::empty({0}, self.options());
addmm_out_cuda(out, self, mat1, mat2, beta, alpha);
return out;
}
Tensor& addmm__cuda(Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
addmm_out_cuda(self, self, mat1, mat2, beta, alpha);
return self;
}
Tensor& baddbmm_out_cuda(Tensor &result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor self_;
if (&result != &self) {
std::tie(self_) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm");
} else {
self_ = self;
}
{
at::NoNamesGuard guard;
baddbmm_out_cuda_impl(result, self_, batch1, batch2, beta, alpha);
}
namedinference::propagate_names_if_nonempty(
result,
namedinference::compute_baddbmm_outnames(result, batch1, batch2, self));
return result;
}
Tensor baddbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor out = at::empty({0}, self.options());
return baddbmm_out_cuda(out, self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm__cuda(Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
return baddbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
}
Tensor& bmm_out_cuda(Tensor &result, const Tensor& batch1, const Tensor& batch2) {
result.resize_({ batch1.size(0), batch1.size(1), batch2.size(2) });
Scalar beta(0.0);
Scalar alpha(1.0);
{
NoNamesGuard guard;
baddbmm_out_cuda_impl(result, result, batch1, batch2, beta, alpha);
}
namedinference::propagate_names_if_nonempty(
result,
namedinference::compute_bmm_outnames(result, batch1, batch2));
return result;
}
Tensor bmm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({0}, self.options());
return native::bmm_out_cuda(result, self, mat2);
}
namespace {
inline void dot_check(const Tensor& self, const Tensor& other) {
TORCH_CHECK(
self.dim() == 1 && other.dim() == 1,
"1D tensors expected, but got ",
self.dim(),
"D and ",
other.dim(),
"D tensors");
TORCH_CHECK(
self.scalar_type() == other.scalar_type(),
"dot : expected both vectors to have same dtype, but found ",
self.scalar_type(),
" and ",
other.scalar_type());
TORCH_CHECK(
self.numel() == other.numel(),
"inconsistent tensor size, expected tensor [",
self.numel(),
"] and src [",
other.numel(),
"] to have the same number of elements, but got ",
self.numel(),
" and ",
other.numel(),
" elements respectively");
TORCH_CHECK(
self.device() == other.device(),
"expected all tensors to be on the same device. Found: ",
self.device(),
", ",
other.device());
TORCH_CHECK(
(self.numel() <= INT_MAX) && (self.stride(0) <= INT_MAX) &&
(other.stride(0) <= INT_MAX),
"dot only supports n, incx, incy with the bound [val] <= %d",
INT_MAX);
}
} // anonymous namespace
Tensor dot_cuda(const Tensor& self, const Tensor& other) {
at::NoNamesGuard guard;
dot_check(self, other);
const int n = static_cast<int>(self.numel());
int incx = static_cast<int>(self.stride(0));
int incy = static_cast<int>(other.stride(0));
if (n == 1) {
incx = 1;
incy = 1;
}
return AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, self.scalar_type(), "dot", [&] {
Tensor result = at::empty({}, self.options());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::PointerModeGuard pointerModeGuard(handle, HIPBLAS_POINTER_MODE_DEVICE);
at::cuda::blas::dot<scalar_t>(
handle,
n,
self.data_ptr<scalar_t>(),
incx,
other.data_ptr<scalar_t>(),
incy,
result.data_ptr<scalar_t>());
return result;
});
}
Tensor vdot_cuda(const Tensor& self, const Tensor& other) {
if (!self.is_complex()) {
return dot_cuda(self, other);
}
at::NoNamesGuard guard;
dot_check(self, other);
const int n = static_cast<int>(self.numel());
int incx = static_cast<int>(self.stride(0));
int incy = static_cast<int>(other.stride(0));
if (n == 1) {
incx = 1;
incy = 1;
}
return AT_DISPATCH_COMPLEX_TYPES(self.scalar_type(), "vdot", [&] {
Tensor result = at::empty({}, self.options());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::PointerModeGuard pointerModeGuard(
handle, HIPBLAS_POINTER_MODE_DEVICE);
at::cuda::blas::vdot<scalar_t>(
handle,
n,
self.data_ptr<scalar_t>(),
incx,
other.data_ptr<scalar_t>(),
incy,
result.data_ptr<scalar_t>());
return result;
});
}
namespace {
void addr_kernel_cuda(TensorIterator &iter, Scalar beta, Scalar alpha) {
if (iter.dtype() == ScalarType::Bool) {
using scalar_t = bool;
auto beta_val = beta.to<scalar_t>();
auto alpha_val = alpha.to<scalar_t>();
// when beta is false, values in self should be ignored,
// nans and infs in self should not propagate.
if (beta_val == false) {
gpu_kernel(
iter,
[=] GPU_LAMBDA (scalar_t self_val,
scalar_t vec1_val, scalar_t vec2_val) -> scalar_t {
return alpha_val && vec1_val && vec2_val;
}
);
} else {
gpu_kernel(
iter,
[=] GPU_LAMBDA (scalar_t self_val,
scalar_t vec1_val, scalar_t vec2_val) -> scalar_t {
return (beta_val && self_val) || (alpha_val && vec1_val && vec2_val);
}
);
}
return;
}
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kBFloat16, kHalf,
iter.dtype(), "addr_cuda", [&] {
auto beta_val = beta.to<scalar_t>();
auto alpha_val = alpha.to<scalar_t>();
scalar_t zero_val(0);
// when beta==0, values in self should be ignored,
// nans and infs in self should not propagate.
if (beta_val == zero_val) {
gpu_kernel(
iter,
[=] GPU_LAMBDA (scalar_t self_val,
scalar_t vec1_val, scalar_t vec2_val) -> scalar_t {
return alpha_val * vec1_val * vec2_val;
}
);
} else {
gpu_kernel(
iter,
[=] GPU_LAMBDA (scalar_t self_val,
scalar_t vec1_val, scalar_t vec2_val) -> scalar_t {
return beta_val * self_val + alpha_val * vec1_val * vec2_val;
}
);
}
});
}
} // anonymous namespace
REGISTER_DISPATCH(addr_stub, &addr_kernel_cuda);
}}
|
cb5730903afcce0e2d7a93fef6d9aa01cbf7f8ac.cu
|
#include <ATen/ATen.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/cuda/CUDABlas.h>
#include <ATen/Dispatch.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/LinearAlgebra.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
namespace at { namespace native {
Tensor prepare_matrix_for_cublas(Tensor& tensor, bool& transpose_tensor) {
Tensor tensor_;
IntArrayRef tensor_strides = tensor.strides();
IntArrayRef tensor_sizes = tensor.sizes();
if ((tensor_strides[0] == 1) && (tensor_strides[1] >= std::max<int64_t>(1, tensor_sizes[0]))) {
tensor_ = tensor;
transpose_tensor = false;
} else if ((tensor_strides[1] == 1) && (tensor_strides[0] >= std::max<int64_t>(1, tensor_sizes[1]))) {
tensor_ = tensor;
transpose_tensor = true;
} else {
transpose_tensor = true;
tensor_ = tensor.clone(at::MemoryFormat::Contiguous);
}
return tensor_;
}
Tensor prepare_batch_matrix_for_cublas(const Tensor& tensor, bool& transpose_tensor, int64_t& ld_tensor, bool transpose_result, int64_t m, int64_t n) {
IntArrayRef tensor_strides = tensor.strides();
Tensor tensor_;
int fast_dim = transpose_result ? 2 : 1;
int leading_dim = transpose_result ? 1 : 2;
if (tensor_strides[fast_dim] == 1 &&
(tensor_strides[leading_dim] >= std::max<int64_t>(1, m))) {
transpose_tensor = false;
tensor_ = tensor;
ld_tensor = tensor_strides[leading_dim];
} else if ((tensor_strides[leading_dim] == 1) &&
(tensor_strides[fast_dim] >= std::max<int64_t>(1, n))) {
transpose_tensor = true;
tensor_ = tensor;
ld_tensor = tensor_strides[fast_dim];
} else {
transpose_tensor = !transpose_result;
if (tensor.is_contiguous()) {
tensor_ = tensor;
} else {
tensor_ = tensor.clone(at::MemoryFormat::Contiguous);
}
ld_tensor = tensor_.stride(1);
}
return tensor_;
}
namespace {
Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
TORCH_CHECK(mat1.dim() == 2 && mat2.dim() == 2, "tensors must be 2-D");
TensorArg args[]{{result, "out", 0}, {self, "self", 1}, {mat1, "mat1", 2}, {mat2, "mat2", 3}};
checkAllSameGPU("addmm", args);
Tensor self_;
if (&result != &self) {
std::tie(self_) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
} else {
self_ = self;
}
IntArrayRef mat1_sizes = mat1.sizes();
IntArrayRef mat2_sizes = mat2.sizes();
IntArrayRef self__sizes = self_.sizes();
TORCH_CHECK(mat1_sizes[1] == mat2_sizes[0], "mat1 dim 1 must match mat2 dim 0");
TORCH_CHECK(self__sizes[0] == mat1_sizes[0], "self_ dim 0 must match mat1 dim 0");
TORCH_CHECK(self__sizes[1] == mat2_sizes[1], "self_ dim 1 must match mat2 dim 1");
if (&result != &self) {
at::native::resize_as_(result, self_);
if (beta.toComplexDouble() != 0.0) {
at::native::copy_(result, self_);
}
}
TORCH_CHECK(result.dim() == 2 && self_.dim() == 2, "tensors must be 2-D");
IntArrayRef result_sizes = result.sizes();
if ((result_sizes[0] == 0) || (result_sizes[1] == 0)) {
return result;
}
bool transpose_result;
Tensor result_ = prepare_matrix_for_cublas(result, transpose_result);
bool transpose_mat1;
bool transpose_mat2;
Tensor mat1_ = transpose_result ? mat2 : mat1;
Tensor mat2_ = transpose_result ? mat1 : mat2;
mat1_ = prepare_matrix_for_cublas(mat1_, transpose_mat1);
mat2_ = prepare_matrix_for_cublas(mat2_, transpose_mat2);
if (transpose_result) {
transpose_mat1 = !transpose_mat1;
transpose_mat2 = !transpose_mat2;
mat1_sizes = mat1_.sizes();
mat2_sizes = mat2_.sizes();
}
int64_t m = mat1_sizes[transpose_result ? 1 : 0];
int64_t k = mat1_sizes[transpose_result ? 0 : 1];
int64_t n = mat2_sizes[transpose_result ? 0 : 1];
int64_t mat1_ld = mat1_.stride((transpose_mat1 == transpose_result) ? 1 : 0);
int64_t mat2_ld = mat2_.stride((transpose_mat2 == transpose_result) ? 1 : 0);
int64_t result_ld = result_.stride(transpose_result ? 0 : 1);
at::ScalarType scalar_type = self_.scalar_type();
if (mat1.numel() == 0) {
// By definition, when beta==0, values in self should be ignored. nans and infs
// should not propagate
if (beta.toComplexDouble() == 0.) {
return result.zero_();
}
return at::native::mul_out(result, self, at::native::scalar_tensor(beta, at::device(at::kCPU).dtype(self.scalar_type())));
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "addmm_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t* mat1_ptr = mat1_.data_ptr<scalar_t>();
scalar_t* mat2_ptr = mat2_.data_ptr<scalar_t>();
scalar_t* result_ptr = result_.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
transpose_mat1 ? 't' : 'n',
transpose_mat2 ? 't' : 'n',
m, n, k,
alpha_val,
mat1_ptr, mat1_ld,
mat2_ptr, mat2_ld,
beta_val,
result_ptr, result_ld
);
});
if (result.data_ptr() != result_.data_ptr()) {
result.copy_(result_);
}
return result;
}
Tensor& baddbmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
TORCH_CHECK(self.dim() == 3, "self must be a 3D tensor");
TORCH_CHECK(batch1.dim() == 3, "batch1 must be a 3D tensor");
TORCH_CHECK(batch2.dim() == 3, "batch2 must be a 3D tensor");
TensorArg args[]{{result, "out", 0}, {self, "self", 1}, {batch1, "batch1", 2}, {batch2, "batch2", 3}};
checkAllSameGPU("baddbmm", args);
IntArrayRef batch1_sizes = batch1.sizes();
IntArrayRef batch2_sizes = batch2.sizes();
IntArrayRef self_sizes = self.sizes();
TORCH_CHECK(self_sizes[0] == batch1_sizes[0], "self dim 0 must match batch1 dim 0");
TORCH_CHECK(self_sizes[0] == batch2_sizes[0], "self dim 0 must match batch2 dim 0");
TORCH_CHECK(self_sizes[1] == batch1_sizes[1], "self dim 1 must match batch1 dim 1");
TORCH_CHECK(self_sizes[2] == batch2_sizes[2], "self dim 2 must match batch2 dim 2");
TORCH_CHECK(batch1_sizes[2] == batch2_sizes[1], "batch1 dim 2 must match batch2 dim 1");
if (!result.is_same(self)) {
result.resize_as_(self);
if (beta.to<c10::complex<double>>() != 0.0) {
result.copy_(self);
}
}
bool transpose_result = false;
Tensor result_;
IntArrayRef result_strides = result.strides();
IntArrayRef result_sizes = result.sizes();
if ((result_strides[1] == 1) &&
((result_sizes[2] == 1) || (result_strides[2] >= std::max<int64_t>(1, result_sizes[1])))) {
result_ = result;
} else if ((result_strides[2] == 1) &&
(result_sizes[1] == 1 || (result_strides[1] >= std::max<int64_t>(1, result_sizes[2])))) {
transpose_result = true;
result_ = result;
} else {
result_ = result.transpose(1, 2).clone(at::MemoryFormat::Contiguous);
result_ = result_.transpose(1, 2);
}
int leading_dim = transpose_result ? 1 : 2;
Tensor batch1_ = transpose_result ? batch2 : batch1;
Tensor batch2_ = transpose_result ? batch1 : batch2;
int64_t m = result_sizes[transpose_result ? 2 : 1];
int64_t n = result_sizes[leading_dim];
int64_t k = batch1_.size(leading_dim);
int64_t lda, ldb, ldc;
bool transpose_batch1, transpose_batch2;
batch1_ = prepare_batch_matrix_for_cublas(batch1_, transpose_batch1, lda, transpose_result, m, k);
batch2_ = prepare_batch_matrix_for_cublas(batch2_, transpose_batch2, ldb, transpose_result, k, n);
ldc = result_.stride(leading_dim);
int64_t num_batches = result_.size(0);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "baddbmm_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t* batch1_ptr = batch1_.data_ptr<scalar_t>();
scalar_t* batch2_ptr = batch2_.data_ptr<scalar_t>();
scalar_t* result_ptr = result_.data_ptr<scalar_t>();
at::cuda::blas::bgemm<scalar_t>(
transpose_batch1 ? 't' : 'n',
transpose_batch2 ? 't' : 'n',
m, n, k,
alpha_val,
batch1_ptr, lda, batch1_.stride(0),
batch2_ptr, ldb, batch2_.stride(0),
beta_val,
result_ptr, ldc, result_.stride(0),
num_batches
);
});
if (!result.is_same(result_)) {
result.copy_(result_);
}
return result;
}
} // anonymous namespace
Tensor& mm_out_cuda(Tensor& result, const Tensor& self, const Tensor& mat2) {
result.resize_({ self.size(0), mat2.size(1) });
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor mm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({ self.size(0), mat2.size(1) }, self.options());
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor& addmm_out_cuda(Tensor &out, const Tensor &self,
const Tensor &mat1, const Tensor &mat2,
Scalar beta, Scalar alpha) {
{
at::NoNamesGuard guard;
Tensor& result = addmm_out_cuda_impl(out, self, mat1, mat2, beta, alpha);
}
at::namedinference::propagate_names_for_addmm(out, mat1, mat2, self);
return out;
}
Tensor addmm_cuda(const Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
Tensor out = at::empty({0}, self.options());
addmm_out_cuda(out, self, mat1, mat2, beta, alpha);
return out;
}
Tensor& addmm__cuda(Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
addmm_out_cuda(self, self, mat1, mat2, beta, alpha);
return self;
}
Tensor& baddbmm_out_cuda(Tensor &result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor self_;
if (&result != &self) {
std::tie(self_) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm");
} else {
self_ = self;
}
{
at::NoNamesGuard guard;
baddbmm_out_cuda_impl(result, self_, batch1, batch2, beta, alpha);
}
namedinference::propagate_names_if_nonempty(
result,
namedinference::compute_baddbmm_outnames(result, batch1, batch2, self));
return result;
}
Tensor baddbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor out = at::empty({0}, self.options());
return baddbmm_out_cuda(out, self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm__cuda(Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
return baddbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
}
Tensor& bmm_out_cuda(Tensor &result, const Tensor& batch1, const Tensor& batch2) {
result.resize_({ batch1.size(0), batch1.size(1), batch2.size(2) });
Scalar beta(0.0);
Scalar alpha(1.0);
{
NoNamesGuard guard;
baddbmm_out_cuda_impl(result, result, batch1, batch2, beta, alpha);
}
namedinference::propagate_names_if_nonempty(
result,
namedinference::compute_bmm_outnames(result, batch1, batch2));
return result;
}
Tensor bmm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({0}, self.options());
return native::bmm_out_cuda(result, self, mat2);
}
namespace {
inline void dot_check(const Tensor& self, const Tensor& other) {
TORCH_CHECK(
self.dim() == 1 && other.dim() == 1,
"1D tensors expected, but got ",
self.dim(),
"D and ",
other.dim(),
"D tensors");
TORCH_CHECK(
self.scalar_type() == other.scalar_type(),
"dot : expected both vectors to have same dtype, but found ",
self.scalar_type(),
" and ",
other.scalar_type());
TORCH_CHECK(
self.numel() == other.numel(),
"inconsistent tensor size, expected tensor [",
self.numel(),
"] and src [",
other.numel(),
"] to have the same number of elements, but got ",
self.numel(),
" and ",
other.numel(),
" elements respectively");
TORCH_CHECK(
self.device() == other.device(),
"expected all tensors to be on the same device. Found: ",
self.device(),
", ",
other.device());
TORCH_CHECK(
(self.numel() <= INT_MAX) && (self.stride(0) <= INT_MAX) &&
(other.stride(0) <= INT_MAX),
"dot only supports n, incx, incy with the bound [val] <= %d",
INT_MAX);
}
} // anonymous namespace
Tensor dot_cuda(const Tensor& self, const Tensor& other) {
at::NoNamesGuard guard;
dot_check(self, other);
const int n = static_cast<int>(self.numel());
int incx = static_cast<int>(self.stride(0));
int incy = static_cast<int>(other.stride(0));
if (n == 1) {
incx = 1;
incy = 1;
}
return AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, self.scalar_type(), "dot", [&] {
Tensor result = at::empty({}, self.options());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::PointerModeGuard pointerModeGuard(handle, CUBLAS_POINTER_MODE_DEVICE);
at::cuda::blas::dot<scalar_t>(
handle,
n,
self.data_ptr<scalar_t>(),
incx,
other.data_ptr<scalar_t>(),
incy,
result.data_ptr<scalar_t>());
return result;
});
}
Tensor vdot_cuda(const Tensor& self, const Tensor& other) {
if (!self.is_complex()) {
return dot_cuda(self, other);
}
at::NoNamesGuard guard;
dot_check(self, other);
const int n = static_cast<int>(self.numel());
int incx = static_cast<int>(self.stride(0));
int incy = static_cast<int>(other.stride(0));
if (n == 1) {
incx = 1;
incy = 1;
}
return AT_DISPATCH_COMPLEX_TYPES(self.scalar_type(), "vdot", [&] {
Tensor result = at::empty({}, self.options());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::PointerModeGuard pointerModeGuard(
handle, CUBLAS_POINTER_MODE_DEVICE);
at::cuda::blas::vdot<scalar_t>(
handle,
n,
self.data_ptr<scalar_t>(),
incx,
other.data_ptr<scalar_t>(),
incy,
result.data_ptr<scalar_t>());
return result;
});
}
namespace {
void addr_kernel_cuda(TensorIterator &iter, Scalar beta, Scalar alpha) {
if (iter.dtype() == ScalarType::Bool) {
using scalar_t = bool;
auto beta_val = beta.to<scalar_t>();
auto alpha_val = alpha.to<scalar_t>();
// when beta is false, values in self should be ignored,
// nans and infs in self should not propagate.
if (beta_val == false) {
gpu_kernel(
iter,
[=] GPU_LAMBDA (scalar_t self_val,
scalar_t vec1_val, scalar_t vec2_val) -> scalar_t {
return alpha_val && vec1_val && vec2_val;
}
);
} else {
gpu_kernel(
iter,
[=] GPU_LAMBDA (scalar_t self_val,
scalar_t vec1_val, scalar_t vec2_val) -> scalar_t {
return (beta_val && self_val) || (alpha_val && vec1_val && vec2_val);
}
);
}
return;
}
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kBFloat16, kHalf,
iter.dtype(), "addr_cuda", [&] {
auto beta_val = beta.to<scalar_t>();
auto alpha_val = alpha.to<scalar_t>();
scalar_t zero_val(0);
// when beta==0, values in self should be ignored,
// nans and infs in self should not propagate.
if (beta_val == zero_val) {
gpu_kernel(
iter,
[=] GPU_LAMBDA (scalar_t self_val,
scalar_t vec1_val, scalar_t vec2_val) -> scalar_t {
return alpha_val * vec1_val * vec2_val;
}
);
} else {
gpu_kernel(
iter,
[=] GPU_LAMBDA (scalar_t self_val,
scalar_t vec1_val, scalar_t vec2_val) -> scalar_t {
return beta_val * self_val + alpha_val * vec1_val * vec2_val;
}
);
}
});
}
} // anonymous namespace
REGISTER_DISPATCH(addr_stub, &addr_kernel_cuda);
}}
|
b5fb236952b638388522cd61e7970aebe9527f72.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "BigFloat.cu"
#include "hip.hip"
|
b5fb236952b638388522cd61e7970aebe9527f72.cu
|
#include "BigFloat.cu"
#include "cuda.cu"
|
76333a0250a055bb5b0e60509d8d3ed38fa8a272.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdint.h>
#include "hip/hip_runtime.h"
#include "cudaTools.h"
#include "Chronos.h"
double launchKernelMemoryTransfert ( const void* memory, const size_t size ) {
Chronos chrono;
void* ptrDevMemory;
chrono.start ();
HANDLE_ERROR( hipMalloc( &ptrDevMemory, size ) );
HANDLE_ERROR( hipMemcpy ( ptrDevMemory, memory, size, hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipDeviceSynchronize () );
HANDLE_ERROR( hipFree ( ptrDevMemory ) );
return chrono.timeFlight ();
}
|
76333a0250a055bb5b0e60509d8d3ed38fa8a272.cu
|
#include <stdint.h>
#include "cuda_runtime.h"
#include "cudaTools.h"
#include "Chronos.h"
double launchKernelMemoryTransfert ( const void* memory, const size_t size ) {
Chronos chrono;
void* ptrDevMemory;
chrono.start ();
HANDLE_ERROR( cudaMalloc( &ptrDevMemory, size ) );
HANDLE_ERROR( cudaMemcpy ( ptrDevMemory, memory, size, cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaDeviceSynchronize () );
HANDLE_ERROR( cudaFree ( ptrDevMemory ) );
return chrono.timeFlight ();
}
|
22067bf802f1c031f58cdfde75eca4d331cefe8f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "parameters.cuh"
#include <cstring>
point *pos_colloid, *pos_fl, *vel_colloid, *vel_fl, *ang_vel_colloid, *f, *ra, *old_force, len = point(30, 30, 30), *cell_vel, **rot, *dump_vel_fl, **u, **vc, **om;
int n = 10, niter = 21000, file = 0, nbin = 300, maxpart = 100, no_of_colloid = 10, nbox, **nbr, **up_nbr, *cnt, *up_cnt, *fluid_no, *iv, *seed, *iy, **dp;
int no_of_fluid = len.prod()*10, *no_neigh, **neigh_fl, **neighbour, *n_neighbour, **box_neigh, **box_part, **cell_part, nn, ran_c = 0, *idum;
double kbt = 1, kbt1 = 1, ndt = 0.1, dv = 0.1, mass_fl = 1.0, mass_colloid = 654.1, sig_colloid = 5.0, eps = 1.0, v0 = 0;
double dt = ndt/(double)n, sigma = 0.80*sig_colloid, I_colloid = 0.1*mass_colloid*sigma*sigma, *potential_colloid, *rana, *ranb;
void initialize() {
point **ppointers[] = {&pos_fl, &vel_fl, &f, &pos_colloid, &vel_colloid, &ang_vel_colloid, &old_force, &ra};
int **ipointers[] = {&fluid_no, &n_neighbour, &no_neigh, &cnt, &up_cnt};
int isize[] = {(int)len.prod(), no_of_colloid };
int psize[] = {no_of_fluid, no_of_colloid};
hipMallocManaged(&box_part, (len.prod() + 2)*sizeof(int *));
hipMallocManaged(&cell_part, (len.prod() + 2)*sizeof(int *));
hipMallocManaged(&box_neigh, sizeof(int *)*512);
hipMallocManaged(&neighbour, sizeof(int *)*256);
hipMallocManaged(&neigh_fl, sizeof(int *)*(no_of_colloid + 2));
hipMallocManaged(&dp, sizeof(int *)*(no_of_colloid + 2));
hipMallocManaged(&nbr, sizeof(int *)*7005);
hipMallocManaged(&up_nbr, sizeof(int *)*7005);
hipMallocManaged(&u, (no_of_colloid + 2)*sizeof(point *));
hipMallocManaged(&rot, (len.prod() + 2)*sizeof(point *));
hipMallocManaged(&cell_vel, (len.prod() + 2)*sizeof(point));
hipMallocManaged(&dump_vel_fl, (no_of_fluid + 2)*sizeof(point));
hipMallocManaged(&iv, sizeof(int)*64);
hipMallocManaged(&seed, sizeof(int));
hipMallocManaged(&idum, sizeof(int));
hipMallocManaged(&iy, sizeof(int));
hipMallocManaged(&potential_colloid, sizeof(double));
hipMallocManaged(&vc, (no_of_colloid + 2)*sizeof(point *));
hipMallocManaged(&om, (no_of_colloid + 2)*sizeof(point *));
hipMallocManaged(&rana, sizeof(double)*(len.prod() + 2));
hipMallocManaged(&ranb, sizeof(double)*(len.prod() + 2));
*seed = 77777, *idum = 123456789, *iy = 0;
for(int i = 0; i < 64; i++) iv[i] = 0;
for(int i = 0; i < 8; i++) {
if(i < 5) hipMallocManaged(ipointers[i], (isize[i>0] + 2)*sizeof(int));
hipMallocManaged(ppointers[i], (psize[i>1] + 2)*sizeof(point));
}
for(int i = 0; i <= len.prod(); i++) {
if(i <= 500) hipMallocManaged(&box_neigh[i], sizeof(int)*(len.prod() + 2));
if(i <= 200) hipMallocManaged(&neighbour[i], sizeof(int)*(no_of_colloid + 2));
if(i <= 7000) hipMallocManaged(&nbr[i], sizeof(int)*(no_of_colloid + 2));
if(i <= 7000) hipMallocManaged(&up_nbr[i], sizeof(int)*(no_of_colloid + 2));
if(i <= no_of_colloid) hipMallocManaged(&neigh_fl[i], sizeof(int)*(10000 + 2));
if(i <= no_of_colloid) hipMallocManaged(&vc[i], sizeof(point)*(10000 + 2));
if(i <= no_of_colloid) hipMallocManaged(&u[i], sizeof(point)*(10000 + 2));
if(i <= no_of_colloid) hipMallocManaged(&om[i], sizeof(point)*(10000 + 2));
if(i <= no_of_colloid) hipMallocManaged(&dp[i], sizeof(int)*(512)); //512 = nbox
hipMallocManaged(&box_part[i], sizeof(int)*(maxpart + 2));
hipMallocManaged(&cell_part[i], sizeof(int)*(maxpart + 2));
hipMallocManaged(&rot[i], sizeof(point)*4);
}
}
void initialize_colloid() {
int counter = 0, check, nofp = 0;
double space_limit = 1.3*sig_colloid, ang_vscale_colloid = sqrt(12.0*kbt1/I_colloid), vscale_colloid = sqrt(12.0*kbt1/mass_colloid);
point avr_vel = point(0, 0, 0), t, temp, iter = point(4, 4, 4), lim = len - point(1, 1, 1);
for(int i = 0; i <= lim.prod(); i += 5, iter.next(lim, point(5, 5, 5), point(4, 4, 4)), nofp++) {
if(nofp < no_of_colloid) pos_colloid[++nofp] = iter;
else break;
}
while(counter < no_of_colloid) {
t = t.random(iv, seed, idum, iy)*len;
check = 1;
for(int j = 1; j <= counter; j++) {
temp = img(t - pos_colloid[j], len);
check = (sqrt((temp*temp).sum()) < space_limit)? 0: check;
}
if(check)
pos_colloid[++counter] = t;
}
for(int j = 1; j <= no_of_colloid; j++) {
vel_colloid[j] = (vel_colloid[j].random(iv, seed, idum, iy) - point(0.5, 0.5, 0.5))*vscale_colloid;
avr_vel += vel_colloid[j];
}
avr_vel = avr_vel/no_of_colloid;
for(int j = 1; j <= no_of_colloid; j++) {
vel_colloid[j] = vel_colloid[j] - avr_vel;
ang_vel_colloid[j] = (t.random(iv, seed, idum, iy) - point(0.5, 0.5, 0.5))*ang_vscale_colloid;
}
}
void initialize_fluid() {
int counter = 0, check;
double vscale_fluid = sqrt(12.0*kbt/mass_fl);
point avr_vel = point(0, 0, 0), t, temp;
while(counter < no_of_fluid) {
t = t.random(iv, seed, idum, iy)*len;
check = 1;
for(int j = 1; j <= no_of_colloid; j++) {
temp = img(t - pos_colloid[j], len);
check = (sqrt((temp*temp).sum()) < sigma*0.5)? 0: check;
}
if(check)
pos_fl[++counter] = t;
}
for(int j = 1; j <= no_of_fluid; j++) {
vel_fl[j] = (vel_fl[j].random(iv, seed, idum, iy) - point(0.5, 0.5, 0.5))*vscale_fluid;
avr_vel += vel_fl[j];
}
avr_vel = avr_vel/no_of_fluid;
for(int j = 1; j <= no_of_fluid; j++)
vel_fl[j] = vel_fl[j] - avr_vel;
}
|
22067bf802f1c031f58cdfde75eca4d331cefe8f.cu
|
#include "parameters.cuh"
#include <cstring>
point *pos_colloid, *pos_fl, *vel_colloid, *vel_fl, *ang_vel_colloid, *f, *ra, *old_force, len = point(30, 30, 30), *cell_vel, **rot, *dump_vel_fl, **u, **vc, **om;
int n = 10, niter = 21000, file = 0, nbin = 300, maxpart = 100, no_of_colloid = 10, nbox, **nbr, **up_nbr, *cnt, *up_cnt, *fluid_no, *iv, *seed, *iy, **dp;
int no_of_fluid = len.prod()*10, *no_neigh, **neigh_fl, **neighbour, *n_neighbour, **box_neigh, **box_part, **cell_part, nn, ran_c = 0, *idum;
double kbt = 1, kbt1 = 1, ndt = 0.1, dv = 0.1, mass_fl = 1.0, mass_colloid = 654.1, sig_colloid = 5.0, eps = 1.0, v0 = 0;
double dt = ndt/(double)n, sigma = 0.80*sig_colloid, I_colloid = 0.1*mass_colloid*sigma*sigma, *potential_colloid, *rana, *ranb;
void initialize() {
point **ppointers[] = {&pos_fl, &vel_fl, &f, &pos_colloid, &vel_colloid, &ang_vel_colloid, &old_force, &ra};
int **ipointers[] = {&fluid_no, &n_neighbour, &no_neigh, &cnt, &up_cnt};
int isize[] = {(int)len.prod(), no_of_colloid };
int psize[] = {no_of_fluid, no_of_colloid};
cudaMallocManaged(&box_part, (len.prod() + 2)*sizeof(int *));
cudaMallocManaged(&cell_part, (len.prod() + 2)*sizeof(int *));
cudaMallocManaged(&box_neigh, sizeof(int *)*512);
cudaMallocManaged(&neighbour, sizeof(int *)*256);
cudaMallocManaged(&neigh_fl, sizeof(int *)*(no_of_colloid + 2));
cudaMallocManaged(&dp, sizeof(int *)*(no_of_colloid + 2));
cudaMallocManaged(&nbr, sizeof(int *)*7005);
cudaMallocManaged(&up_nbr, sizeof(int *)*7005);
cudaMallocManaged(&u, (no_of_colloid + 2)*sizeof(point *));
cudaMallocManaged(&rot, (len.prod() + 2)*sizeof(point *));
cudaMallocManaged(&cell_vel, (len.prod() + 2)*sizeof(point));
cudaMallocManaged(&dump_vel_fl, (no_of_fluid + 2)*sizeof(point));
cudaMallocManaged(&iv, sizeof(int)*64);
cudaMallocManaged(&seed, sizeof(int));
cudaMallocManaged(&idum, sizeof(int));
cudaMallocManaged(&iy, sizeof(int));
cudaMallocManaged(&potential_colloid, sizeof(double));
cudaMallocManaged(&vc, (no_of_colloid + 2)*sizeof(point *));
cudaMallocManaged(&om, (no_of_colloid + 2)*sizeof(point *));
cudaMallocManaged(&rana, sizeof(double)*(len.prod() + 2));
cudaMallocManaged(&ranb, sizeof(double)*(len.prod() + 2));
*seed = 77777, *idum = 123456789, *iy = 0;
for(int i = 0; i < 64; i++) iv[i] = 0;
for(int i = 0; i < 8; i++) {
if(i < 5) cudaMallocManaged(ipointers[i], (isize[i>0] + 2)*sizeof(int));
cudaMallocManaged(ppointers[i], (psize[i>1] + 2)*sizeof(point));
}
for(int i = 0; i <= len.prod(); i++) {
if(i <= 500) cudaMallocManaged(&box_neigh[i], sizeof(int)*(len.prod() + 2));
if(i <= 200) cudaMallocManaged(&neighbour[i], sizeof(int)*(no_of_colloid + 2));
if(i <= 7000) cudaMallocManaged(&nbr[i], sizeof(int)*(no_of_colloid + 2));
if(i <= 7000) cudaMallocManaged(&up_nbr[i], sizeof(int)*(no_of_colloid + 2));
if(i <= no_of_colloid) cudaMallocManaged(&neigh_fl[i], sizeof(int)*(10000 + 2));
if(i <= no_of_colloid) cudaMallocManaged(&vc[i], sizeof(point)*(10000 + 2));
if(i <= no_of_colloid) cudaMallocManaged(&u[i], sizeof(point)*(10000 + 2));
if(i <= no_of_colloid) cudaMallocManaged(&om[i], sizeof(point)*(10000 + 2));
if(i <= no_of_colloid) cudaMallocManaged(&dp[i], sizeof(int)*(512)); //512 = nbox
cudaMallocManaged(&box_part[i], sizeof(int)*(maxpart + 2));
cudaMallocManaged(&cell_part[i], sizeof(int)*(maxpart + 2));
cudaMallocManaged(&rot[i], sizeof(point)*4);
}
}
void initialize_colloid() {
int counter = 0, check, nofp = 0;
double space_limit = 1.3*sig_colloid, ang_vscale_colloid = sqrt(12.0*kbt1/I_colloid), vscale_colloid = sqrt(12.0*kbt1/mass_colloid);
point avr_vel = point(0, 0, 0), t, temp, iter = point(4, 4, 4), lim = len - point(1, 1, 1);
for(int i = 0; i <= lim.prod(); i += 5, iter.next(lim, point(5, 5, 5), point(4, 4, 4)), nofp++) {
if(nofp < no_of_colloid) pos_colloid[++nofp] = iter;
else break;
}
while(counter < no_of_colloid) {
t = t.random(iv, seed, idum, iy)*len;
check = 1;
for(int j = 1; j <= counter; j++) {
temp = img(t - pos_colloid[j], len);
check = (sqrt((temp*temp).sum()) < space_limit)? 0: check;
}
if(check)
pos_colloid[++counter] = t;
}
for(int j = 1; j <= no_of_colloid; j++) {
vel_colloid[j] = (vel_colloid[j].random(iv, seed, idum, iy) - point(0.5, 0.5, 0.5))*vscale_colloid;
avr_vel += vel_colloid[j];
}
avr_vel = avr_vel/no_of_colloid;
for(int j = 1; j <= no_of_colloid; j++) {
vel_colloid[j] = vel_colloid[j] - avr_vel;
ang_vel_colloid[j] = (t.random(iv, seed, idum, iy) - point(0.5, 0.5, 0.5))*ang_vscale_colloid;
}
}
void initialize_fluid() {
int counter = 0, check;
double vscale_fluid = sqrt(12.0*kbt/mass_fl);
point avr_vel = point(0, 0, 0), t, temp;
while(counter < no_of_fluid) {
t = t.random(iv, seed, idum, iy)*len;
check = 1;
for(int j = 1; j <= no_of_colloid; j++) {
temp = img(t - pos_colloid[j], len);
check = (sqrt((temp*temp).sum()) < sigma*0.5)? 0: check;
}
if(check)
pos_fl[++counter] = t;
}
for(int j = 1; j <= no_of_fluid; j++) {
vel_fl[j] = (vel_fl[j].random(iv, seed, idum, iy) - point(0.5, 0.5, 0.5))*vscale_fluid;
avr_vel += vel_fl[j];
}
avr_vel = avr_vel/no_of_fluid;
for(int j = 1; j <= no_of_fluid; j++)
vel_fl[j] = vel_fl[j] - avr_vel;
}
|
998771e69ff0bb7ec06b2c8d9019c74f89939bb8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#ifdef GLOBAL
__device__ char x[THREADS];
#endif
__global__ void racey_kernel() {
#ifdef SHARED
__shared__ char x[THREADS];
#endif
#ifdef WW
x[threadIdx.x] = threadIdx.x;
x[THREADS - threadIdx.x - 1] = threadIdx.x;
#elif RW
volatile char c = x[threadIdx.x];
x[THREADS - threadIdx.x - 1] = threadIdx.x;
#endif
}
int main() {
hipLaunchKernelGGL(( racey_kernel), dim3(BLOCKS),dim3(THREADS), 0, 0, );
hipDeviceReset();
return 0;
}
|
998771e69ff0bb7ec06b2c8d9019c74f89939bb8.cu
|
#include <stdio.h>
#ifdef GLOBAL
__device__ char x[THREADS];
#endif
__global__ void racey_kernel() {
#ifdef SHARED
__shared__ char x[THREADS];
#endif
#ifdef WW
x[threadIdx.x] = threadIdx.x;
x[THREADS - threadIdx.x - 1] = threadIdx.x;
#elif RW
volatile char c = x[threadIdx.x];
x[THREADS - threadIdx.x - 1] = threadIdx.x;
#endif
}
int main() {
racey_kernel<<<BLOCKS,THREADS>>>();
cudaDeviceReset();
return 0;
}
|
0cb64432ce2a16b421fcd65db4c0b71450d4048d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/adaptive_avg_pool2d_impl.cuh"
#include "include/hip/hip_fp16.h"
__device__ inline uint start_index(uint a, uint b, uint c) {
return floorf(__uint2float_rn(a * c) / __uint2float_rn(b));
}
__device__ inline uint end_index(uint a, uint b, uint c) {
return ceilf(__uint2float_rn((a + 1) * c) / __uint2float_rn(b));
}
template <typename T>
__global__ void AdaptiveAvgPool2DKernel(const uint size, const uint input_height, const uint input_width,
const uint output_height, const uint output_width, T *input_data,
T *output_data) {
for (uint c = blockIdx.x * blockDim.x + threadIdx.x; c < size; c += gridDim.x * blockDim.x) {
T *input_ptr = input_data + c * input_height * input_width;
T *output_ptr = output_data + c * output_height * output_width;
for (uint oh = 0; oh < output_height; oh++) {
uint ih0 = start_index(oh, output_height, input_height);
uint ih1 = end_index(oh, output_height, input_height);
uint kh = ih1 - ih0;
for (uint ow = 0; ow < output_width; ow++) {
uint iw0 = start_index(ow, output_width, input_width);
uint iw1 = end_index(ow, output_width, input_width);
uint kw = iw1 - iw0;
// compute local average
T sum = 0;
for (uint ih = ih0; ih < ih1; ih++) {
for (uint iw = iw0; iw < iw1; iw++) {
sum += input_ptr[ih * input_width + iw];
}
}
output_ptr[oh * output_width + ow] = sum / kh / kw;
}
}
}
}
template <>
__global__ void AdaptiveAvgPool2DKernel(const uint size, const uint input_height, const uint input_width,
const uint output_height, const uint output_width, float *input_data,
float *output_data) {
for (uint c = blockIdx.x * blockDim.x + threadIdx.x; c < size; c += gridDim.x * blockDim.x) {
float *input_ptr = input_data + c * input_height * input_width;
float *output_ptr = output_data + c * output_height * output_width;
for (uint oh = 0; oh < output_height; oh++) {
uint ih0 = start_index(oh, output_height, input_height);
uint ih1 = end_index(oh, output_height, input_height);
uint kh = ih1 - ih0;
for (uint ow = 0; ow < output_width; ow++) {
uint iw0 = start_index(ow, output_width, input_width);
uint iw1 = end_index(ow, output_width, input_width);
uint kw = iw1 - iw0;
// compute local average
float sum = 0;
for (uint ih = ih0; ih < ih1; ih++) {
for (uint iw = iw0; iw < iw1; iw++) {
sum += input_ptr[ih * input_width + iw];
}
}
output_ptr[oh * output_width + ow] = sum / __uint2float_rn(kh * kw);
}
}
}
}
template <>
__global__ void AdaptiveAvgPool2DKernel(const uint size, const uint input_height, const uint input_width,
const uint output_height, const uint output_width, half *input_data,
half *output_data) {
for (uint c = blockIdx.x * blockDim.x + threadIdx.x; c < size; c += gridDim.x * blockDim.x) {
half *input_ptr = input_data + c * input_height * input_width;
half *output_ptr = output_data + c * output_height * output_width;
for (uint oh = 0; oh < output_height; oh++) {
uint ih0 = start_index(oh, output_height, input_height);
uint ih1 = end_index(oh, output_height, input_height);
uint kh = ih1 - ih0;
for (uint ow = 0; ow < output_width; ow++) {
uint iw0 = start_index(ow, output_width, input_width);
uint iw1 = end_index(ow, output_width, input_width);
uint kw = iw1 - iw0;
// compute local average
half sum = 0;
for (uint ih = ih0; ih < ih1; ih++) {
for (uint iw = iw0; iw < iw1; iw++) {
sum += input_ptr[ih * input_width + iw];
}
}
output_ptr[oh * output_width + ow] = sum / __uint2half_rn(kh * kw);
}
}
}
}
template <>
__global__ void AdaptiveAvgPool2DKernel(const uint size, const uint input_height, const uint input_width,
const uint output_height, const uint output_width, double *input_data,
double *output_data) {
for (uint c = blockIdx.x * blockDim.x + threadIdx.x; c < size; c += gridDim.x * blockDim.x) {
double *input_ptr = input_data + c * input_height * input_width;
double *output_ptr = output_data + c * output_height * output_width;
for (uint oh = 0; oh < output_height; oh++) {
uint ih0 = start_index(oh, output_height, input_height);
uint ih1 = end_index(oh, output_height, input_height);
uint kh = ih1 - ih0;
for (uint ow = 0; ow < output_width; ow++) {
uint iw0 = start_index(ow, output_width, input_width);
uint iw1 = end_index(ow, output_width, input_width);
uint kw = iw1 - iw0;
// compute local average
double sum = 0;
for (uint ih = ih0; ih < ih1; ih++) {
for (uint iw = iw0; iw < iw1; iw++) {
sum += input_ptr[ih * input_width + iw];
}
}
output_ptr[oh * output_width + ow] = sum / __uint2double_rn(kh * kw);
}
}
}
}
template <typename T>
hipError_t ApplyAdaptiveAvgPool2D(const uint size, const uint input_height, const uint input_width,
const uint output_height, const uint output_width, T *input_data, T *output_data,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( AdaptiveAvgPool2DKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream,
size, input_height, input_width, output_height, output_width, input_data, output_data);
CHECK_CUDA_LAUNCH_SUCCESS();
}
template CUDA_LIB_EXPORT hipError_t ApplyAdaptiveAvgPool2D<float>(const uint size, const uint input_height,
const uint input_width, const uint output_height,
const uint output_width, float *input_data,
float *output_data, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t ApplyAdaptiveAvgPool2D<half>(const uint size, const uint input_height,
const uint input_width, const uint output_height,
const uint output_width, half *input_data,
half *output_data, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t ApplyAdaptiveAvgPool2D<double>(const uint size, const uint input_height,
const uint input_width, const uint output_height,
const uint output_width, double *input_data,
double *output_data, hipStream_t cuda_stream);
|
0cb64432ce2a16b421fcd65db4c0b71450d4048d.cu
|
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/adaptive_avg_pool2d_impl.cuh"
#include "include/cuda_fp16.h"
__device__ inline uint start_index(uint a, uint b, uint c) {
return floorf(__uint2float_rn(a * c) / __uint2float_rn(b));
}
__device__ inline uint end_index(uint a, uint b, uint c) {
return ceilf(__uint2float_rn((a + 1) * c) / __uint2float_rn(b));
}
template <typename T>
__global__ void AdaptiveAvgPool2DKernel(const uint size, const uint input_height, const uint input_width,
const uint output_height, const uint output_width, T *input_data,
T *output_data) {
for (uint c = blockIdx.x * blockDim.x + threadIdx.x; c < size; c += gridDim.x * blockDim.x) {
T *input_ptr = input_data + c * input_height * input_width;
T *output_ptr = output_data + c * output_height * output_width;
for (uint oh = 0; oh < output_height; oh++) {
uint ih0 = start_index(oh, output_height, input_height);
uint ih1 = end_index(oh, output_height, input_height);
uint kh = ih1 - ih0;
for (uint ow = 0; ow < output_width; ow++) {
uint iw0 = start_index(ow, output_width, input_width);
uint iw1 = end_index(ow, output_width, input_width);
uint kw = iw1 - iw0;
// compute local average
T sum = 0;
for (uint ih = ih0; ih < ih1; ih++) {
for (uint iw = iw0; iw < iw1; iw++) {
sum += input_ptr[ih * input_width + iw];
}
}
output_ptr[oh * output_width + ow] = sum / kh / kw;
}
}
}
}
template <>
__global__ void AdaptiveAvgPool2DKernel(const uint size, const uint input_height, const uint input_width,
const uint output_height, const uint output_width, float *input_data,
float *output_data) {
for (uint c = blockIdx.x * blockDim.x + threadIdx.x; c < size; c += gridDim.x * blockDim.x) {
float *input_ptr = input_data + c * input_height * input_width;
float *output_ptr = output_data + c * output_height * output_width;
for (uint oh = 0; oh < output_height; oh++) {
uint ih0 = start_index(oh, output_height, input_height);
uint ih1 = end_index(oh, output_height, input_height);
uint kh = ih1 - ih0;
for (uint ow = 0; ow < output_width; ow++) {
uint iw0 = start_index(ow, output_width, input_width);
uint iw1 = end_index(ow, output_width, input_width);
uint kw = iw1 - iw0;
// compute local average
float sum = 0;
for (uint ih = ih0; ih < ih1; ih++) {
for (uint iw = iw0; iw < iw1; iw++) {
sum += input_ptr[ih * input_width + iw];
}
}
output_ptr[oh * output_width + ow] = sum / __uint2float_rn(kh * kw);
}
}
}
}
template <>
__global__ void AdaptiveAvgPool2DKernel(const uint size, const uint input_height, const uint input_width,
const uint output_height, const uint output_width, half *input_data,
half *output_data) {
for (uint c = blockIdx.x * blockDim.x + threadIdx.x; c < size; c += gridDim.x * blockDim.x) {
half *input_ptr = input_data + c * input_height * input_width;
half *output_ptr = output_data + c * output_height * output_width;
for (uint oh = 0; oh < output_height; oh++) {
uint ih0 = start_index(oh, output_height, input_height);
uint ih1 = end_index(oh, output_height, input_height);
uint kh = ih1 - ih0;
for (uint ow = 0; ow < output_width; ow++) {
uint iw0 = start_index(ow, output_width, input_width);
uint iw1 = end_index(ow, output_width, input_width);
uint kw = iw1 - iw0;
// compute local average
half sum = 0;
for (uint ih = ih0; ih < ih1; ih++) {
for (uint iw = iw0; iw < iw1; iw++) {
sum += input_ptr[ih * input_width + iw];
}
}
output_ptr[oh * output_width + ow] = sum / __uint2half_rn(kh * kw);
}
}
}
}
template <>
__global__ void AdaptiveAvgPool2DKernel(const uint size, const uint input_height, const uint input_width,
const uint output_height, const uint output_width, double *input_data,
double *output_data) {
for (uint c = blockIdx.x * blockDim.x + threadIdx.x; c < size; c += gridDim.x * blockDim.x) {
double *input_ptr = input_data + c * input_height * input_width;
double *output_ptr = output_data + c * output_height * output_width;
for (uint oh = 0; oh < output_height; oh++) {
uint ih0 = start_index(oh, output_height, input_height);
uint ih1 = end_index(oh, output_height, input_height);
uint kh = ih1 - ih0;
for (uint ow = 0; ow < output_width; ow++) {
uint iw0 = start_index(ow, output_width, input_width);
uint iw1 = end_index(ow, output_width, input_width);
uint kw = iw1 - iw0;
// compute local average
double sum = 0;
for (uint ih = ih0; ih < ih1; ih++) {
for (uint iw = iw0; iw < iw1; iw++) {
sum += input_ptr[ih * input_width + iw];
}
}
output_ptr[oh * output_width + ow] = sum / __uint2double_rn(kh * kw);
}
}
}
}
template <typename T>
cudaError_t ApplyAdaptiveAvgPool2D(const uint size, const uint input_height, const uint input_width,
const uint output_height, const uint output_width, T *input_data, T *output_data,
cudaStream_t cuda_stream) {
AdaptiveAvgPool2DKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(
size, input_height, input_width, output_height, output_width, input_data, output_data);
CHECK_CUDA_LAUNCH_SUCCESS();
}
template CUDA_LIB_EXPORT cudaError_t ApplyAdaptiveAvgPool2D<float>(const uint size, const uint input_height,
const uint input_width, const uint output_height,
const uint output_width, float *input_data,
float *output_data, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t ApplyAdaptiveAvgPool2D<half>(const uint size, const uint input_height,
const uint input_width, const uint output_height,
const uint output_width, half *input_data,
half *output_data, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t ApplyAdaptiveAvgPool2D<double>(const uint size, const uint input_height,
const uint input_width, const uint output_height,
const uint output_width, double *input_data,
double *output_data, cudaStream_t cuda_stream);
|
5777afaa9b465c57da531f1204cce476b1471988.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <vector>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <cuda_surface_types.h>
#include "device_launch_parameters.h" //device_launch_parameters.h"
//#include <comutil.h>
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "hip/device_functions.h"
//#include <windows.h>
#include <fstream>
#include <cfloat>
#include "rocblas.h"
#include "cudaCompute.h"
#pragma comment(lib, "cudart")
#pragma comment(lib,"cublas.lib")
using namespace std;
//using namespace _com_util;
__global__ void cuda_SparseIndexForward(int * rowIdx, int * sparseColIndex, float * weight, int rowSize, int inputDim, int outputDim, float * output, float alpha, float beta)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
if (idx < rowSize && idy < outputDim)
{
int fea_end = rowIdx[idx];
int fea_begin = idx > 0 ? rowIdx[idx - 1] : 0;
float sum = 0;
for (int i = fea_begin; i < fea_end; ++i)
{
int fea_idx = sparseColIndex[i];
if(fea_idx < inputDim)
sum += weight[fea_idx * outputDim + idy];
}
output[idx * outputDim + idy] = alpha * output[idx * outputDim + idy] + beta * sum;
}
}
void Cuda_SparseIndexForward(int * rowIdx, int * sparseColIndex, float * weight, int rowSize, int inputDim, int outputDim, float * output, float alpha, float beta)
{
dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM);
dim3 block_tail((rowSize - 1) / DEFAULT_THREAD_PER_DIM + 1, (outputDim - 1) / DEFAULT_THREAD_PER_DIM + 1);
cuda_SparseIndexForward << <block_tail, thread_tail >> >(rowIdx, sparseColIndex, weight, rowSize, inputDim, outputDim, output, alpha, beta);
}
__global__ void cuda_SparseIndexBackward(int * rowIdx, int * sparseColIndex, float * doutput, int rowSize, int inputDim, int outputDim, float * weight, float beta)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < outputDim)
{
for (int b = 0; b < rowSize; b++)
{
int col_end = rowIdx[b];
int col_begin = b == 0 ? 0 : rowIdx[b - 1];
float dv = beta * doutput[b * outputDim + idx];
for (int i = col_begin; i < col_end; i++)
{
int fea_idx = sparseColIndex[i];
weight[fea_idx * outputDim + idx] += dv;
}
}
}
}
void Cuda_SparseIndexBackward(int * rowIdx, int * sparseColIndex, float * doutput, int rowSize, int inputDim, int outputDim, float * weight, float beta)
{
dim3 thread_tail(DEFAULT_THREAD_PER_BLOCK);
dim3 block_tail((outputDim - 1) / DEFAULT_THREAD_PER_BLOCK + 1);
cuda_SparseIndexBackward << <block_tail, thread_tail >> >(rowIdx, sparseColIndex, doutput, rowSize, inputDim, outputDim, weight, beta);
}
__global__ void cuda_Tanh(float * a, float * b, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < size) b[idx] = tanhf(a[idx]);
}
void Cuda_Tanh(float * a, float * b, int size)
{
int nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK;
int nBlockPerGrid = (size + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK;
cuda_Tanh << <nBlockPerGrid, nThreadPerBlock >> >(a, b, size);
}
__global__ void cuda_DerivTanh(float * doutput, float * output, float * dinput, float alpha, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < size)
{
dinput[idx] = dinput[idx] * alpha + doutput[idx] * (1 - output[idx]) * (1 + output[idx]);
}
}
// dinput[idx] = dinput[idx] * alpha + doutput[idx] * (1 - output[idx]) * (1 + output[idx]);
void Cuda_DerivTanh(float * doutput, float * output, float * dinput, float alpha, int size)
{
int nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK;
int nBlockPerGrid = (size + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK;
cuda_DerivTanh <<<nBlockPerGrid, nThreadPerBlock >> >(doutput, output, dinput, alpha, size);
}
__global__ void cuda_VecMulVec(float * pLeft, float * pRight, float * pDst, int dim, int size, float alpha, float beta)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
if (x < size)
{
float sum = 0;
int offset = x * dim;
for (int i = 0; i < dim; i++)
{
sum += pLeft[offset + i] * pRight[offset + i];
}
pDst[x] = alpha * pDst[x] + beta * sum;
}
}
// pDst = pDst * weiDst + pLeft @ pRight;
void Cuda_VecMulVec(float * pLeft, float * pRight, float * pDst, int dim, int size, float alpha, float beta)
{
int nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK;
int nBlockPerGrid = (size + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK;
cuda_VecMulVec << <nBlockPerGrid, nThreadPerBlock >> >(pLeft, pRight, pDst, dim, size, alpha, beta);
}
__global__ void cuda_IVecMulVec(float * pLeft, int * leftIdxA, float * pRight, int * rightIdxB, float * pDst, int dim, int size, float alpha, float beta)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
if (x < size)
{
int leftIdx = leftIdxA[x] * dim;
int rightIdx = rightIdxB[x] * dim;
float sum = 0;
for (int i = 0; i < dim; i++) sum += pLeft[leftIdx + i] * pRight[rightIdx + i];
pDst[x] = alpha * pDst[x] + beta * sum;
}
}
// pDst = pDst * weiDst + pLeft @ pRight;
void Cuda_IVecMulVec(float * pLeft, int * leftIdxA, float * pRight, int * rightIdxB, float * pDst, int dim, int size, float alpha, float beta)
{
int nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK;
int nBlockPerGrid = (size + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK;
cuda_IVecMulVec << <nBlockPerGrid, nThreadPerBlock >> >(pLeft, leftIdxA, pRight, rightIdxB, pDst, dim, size, alpha, beta);
}
__global__ void cuda_CosineSimilarity(float * inputA, float * ASquare, int ASize, float * inputB, float * BSquare, int BSize,
int dim, float * outputC, int * matchIdxA, int * matchIdxB, int matchSize, float eps)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < matchSize)
{
int aid = matchIdxA[idx];
int bid = matchIdxB[idx];
float sumxx = sqrtf(ASquare[aid]);
float sumyy = sqrtf(BSquare[bid]);
float sumxy = 0;
float * ptrA = inputA + aid * dim;
float * ptrB = inputB + bid * dim;
for (int i = 0; i < dim; i++) sumxy += ptrA[i] * ptrB[i];
outputC[idx] = (float)(sumxy * 1.0f / ((float)(sumxx * sumyy) + eps));
}
}
void Cuda_CosineSimilarity(float * inputA, float * ASquare, int ASize, float * inputB, float * BSquare, int BSize,
int dim, float * outputC, int * matchIdxA, int * matchIdxB, int matchSize, float eps)
{
int nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK;
int nBlockPerGrid = (matchSize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK;
cuda_CosineSimilarity << <nBlockPerGrid, nThreadPerBlock >> >(inputA, ASquare, ASize, inputB, BSquare, BSize, dim, outputC, matchIdxA, matchIdxB, matchSize, eps);
}
__global__ void cuda_Deriv_CosineSimilarity_partialMatching(float * src, float * tgt, float *srcSquare, float * tgtSquare, int dim,
int * src2MatchIdx, int * src2MatchElement, int * tgtIdx, int srcSize, int matchSize, float * simi, float * derivSimi,
float * dcSrc, float alpha, float eps)
{
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
//idx -> source/q Index.
if (idx < srcSize && idy < dim)
{
int matchBeginIdx = idx == 0 ? 0 : src2MatchIdx[idx - 1];
int matchEndIdx = src2MatchIdx[idx];
float sum = 0;
float qRoot = sqrtf(srcSquare[idx]);
float qv = src[idx * dim + idy];
float qSquare_qv = qv / (srcSquare[idx] + eps);
for (int match = matchBeginIdx; match < matchEndIdx; match++)
{
int mIdx = src2MatchElement[match];
int dIdx = tgtIdx[mIdx];
float dRoot = sqrtf(tgtSquare[dIdx]);
sum += derivSimi[mIdx] * (tgt[dIdx * dim + idy] / (qRoot * dRoot + eps) - qSquare_qv * simi[mIdx]); /// qSquare);
}
dcSrc[idx * dim + idy] = alpha * dcSrc[idx * dim + idy] + sum;
}
}
void Cuda_DerivCosineSimilarity(float * q, float * d, float *qSquare, float * dSquare, int dim,
int * src2MatchIdx, int * src2MatchElement, int * tgt2MatchIdx, int * tgt2MatchElement, int * srcIdx, int * tgtIdx, int srcSize, int tgtSize, int matchSize,
float * simi, float * derivSimi, float * dcq, float * dcd, float alpha, float eps)
{
dim3 srcThread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM);
dim3 srcBlock_tail((srcSize - 1) / DEFAULT_THREAD_PER_DIM + 1, (dim - 1) / DEFAULT_THREAD_PER_DIM + 1);
cuda_Deriv_CosineSimilarity_partialMatching << <srcBlock_tail, srcThread_tail >> >(q, d, qSquare, dSquare, dim,
src2MatchIdx, src2MatchElement, tgtIdx, srcSize, matchSize, simi, derivSimi, dcq, alpha, eps);
dim3 tgtThread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM);
dim3 tgtBlock_tail((tgtSize - 1) / DEFAULT_THREAD_PER_DIM + 1, (dim - 1) / DEFAULT_THREAD_PER_DIM + 1);
cuda_Deriv_CosineSimilarity_partialMatching << <tgtBlock_tail, tgtThread_tail >> >(d, q, dSquare, qSquare, dim,
tgt2MatchIdx, tgt2MatchElement, srcIdx, tgtSize, matchSize, simi, derivSimi, dcd, alpha, eps);
}
|
5777afaa9b465c57da531f1204cce476b1471988.cu
|
#include <iostream>
#include <vector>
#include <cuda_runtime.h>
#include <cublas.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_surface_types.h>
#include "device_launch_parameters.h" //device_launch_parameters.h"
//#include <comutil.h>
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <curand.h>
#include <curand_kernel.h>
#include "device_functions.h"
//#include <windows.h>
#include <fstream>
#include <cfloat>
#include "cublas_v2.h"
#include "cudaCompute.h"
#pragma comment(lib, "cudart")
#pragma comment(lib,"cublas.lib")
using namespace std;
//using namespace _com_util;
__global__ void cuda_SparseIndexForward(int * rowIdx, int * sparseColIndex, float * weight, int rowSize, int inputDim, int outputDim, float * output, float alpha, float beta)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
if (idx < rowSize && idy < outputDim)
{
int fea_end = rowIdx[idx];
int fea_begin = idx > 0 ? rowIdx[idx - 1] : 0;
float sum = 0;
for (int i = fea_begin; i < fea_end; ++i)
{
int fea_idx = sparseColIndex[i];
if(fea_idx < inputDim)
sum += weight[fea_idx * outputDim + idy];
}
output[idx * outputDim + idy] = alpha * output[idx * outputDim + idy] + beta * sum;
}
}
void Cuda_SparseIndexForward(int * rowIdx, int * sparseColIndex, float * weight, int rowSize, int inputDim, int outputDim, float * output, float alpha, float beta)
{
dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM);
dim3 block_tail((rowSize - 1) / DEFAULT_THREAD_PER_DIM + 1, (outputDim - 1) / DEFAULT_THREAD_PER_DIM + 1);
cuda_SparseIndexForward << <block_tail, thread_tail >> >(rowIdx, sparseColIndex, weight, rowSize, inputDim, outputDim, output, alpha, beta);
}
__global__ void cuda_SparseIndexBackward(int * rowIdx, int * sparseColIndex, float * doutput, int rowSize, int inputDim, int outputDim, float * weight, float beta)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < outputDim)
{
for (int b = 0; b < rowSize; b++)
{
int col_end = rowIdx[b];
int col_begin = b == 0 ? 0 : rowIdx[b - 1];
float dv = beta * doutput[b * outputDim + idx];
for (int i = col_begin; i < col_end; i++)
{
int fea_idx = sparseColIndex[i];
weight[fea_idx * outputDim + idx] += dv;
}
}
}
}
void Cuda_SparseIndexBackward(int * rowIdx, int * sparseColIndex, float * doutput, int rowSize, int inputDim, int outputDim, float * weight, float beta)
{
dim3 thread_tail(DEFAULT_THREAD_PER_BLOCK);
dim3 block_tail((outputDim - 1) / DEFAULT_THREAD_PER_BLOCK + 1);
cuda_SparseIndexBackward << <block_tail, thread_tail >> >(rowIdx, sparseColIndex, doutput, rowSize, inputDim, outputDim, weight, beta);
}
__global__ void cuda_Tanh(float * a, float * b, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < size) b[idx] = tanhf(a[idx]);
}
void Cuda_Tanh(float * a, float * b, int size)
{
int nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK;
int nBlockPerGrid = (size + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK;
cuda_Tanh << <nBlockPerGrid, nThreadPerBlock >> >(a, b, size);
}
__global__ void cuda_DerivTanh(float * doutput, float * output, float * dinput, float alpha, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < size)
{
dinput[idx] = dinput[idx] * alpha + doutput[idx] * (1 - output[idx]) * (1 + output[idx]);
}
}
// dinput[idx] = dinput[idx] * alpha + doutput[idx] * (1 - output[idx]) * (1 + output[idx]);
void Cuda_DerivTanh(float * doutput, float * output, float * dinput, float alpha, int size)
{
int nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK;
int nBlockPerGrid = (size + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK;
cuda_DerivTanh <<<nBlockPerGrid, nThreadPerBlock >> >(doutput, output, dinput, alpha, size);
}
__global__ void cuda_VecMulVec(float * pLeft, float * pRight, float * pDst, int dim, int size, float alpha, float beta)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
if (x < size)
{
float sum = 0;
int offset = x * dim;
for (int i = 0; i < dim; i++)
{
sum += pLeft[offset + i] * pRight[offset + i];
}
pDst[x] = alpha * pDst[x] + beta * sum;
}
}
// pDst = pDst * weiDst + pLeft @ pRight;
void Cuda_VecMulVec(float * pLeft, float * pRight, float * pDst, int dim, int size, float alpha, float beta)
{
int nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK;
int nBlockPerGrid = (size + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK;
cuda_VecMulVec << <nBlockPerGrid, nThreadPerBlock >> >(pLeft, pRight, pDst, dim, size, alpha, beta);
}
__global__ void cuda_IVecMulVec(float * pLeft, int * leftIdxA, float * pRight, int * rightIdxB, float * pDst, int dim, int size, float alpha, float beta)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
if (x < size)
{
int leftIdx = leftIdxA[x] * dim;
int rightIdx = rightIdxB[x] * dim;
float sum = 0;
for (int i = 0; i < dim; i++) sum += pLeft[leftIdx + i] * pRight[rightIdx + i];
pDst[x] = alpha * pDst[x] + beta * sum;
}
}
// pDst = pDst * weiDst + pLeft @ pRight;
void Cuda_IVecMulVec(float * pLeft, int * leftIdxA, float * pRight, int * rightIdxB, float * pDst, int dim, int size, float alpha, float beta)
{
int nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK;
int nBlockPerGrid = (size + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK;
cuda_IVecMulVec << <nBlockPerGrid, nThreadPerBlock >> >(pLeft, leftIdxA, pRight, rightIdxB, pDst, dim, size, alpha, beta);
}
__global__ void cuda_CosineSimilarity(float * inputA, float * ASquare, int ASize, float * inputB, float * BSquare, int BSize,
int dim, float * outputC, int * matchIdxA, int * matchIdxB, int matchSize, float eps)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < matchSize)
{
int aid = matchIdxA[idx];
int bid = matchIdxB[idx];
float sumxx = sqrtf(ASquare[aid]);
float sumyy = sqrtf(BSquare[bid]);
float sumxy = 0;
float * ptrA = inputA + aid * dim;
float * ptrB = inputB + bid * dim;
for (int i = 0; i < dim; i++) sumxy += ptrA[i] * ptrB[i];
outputC[idx] = (float)(sumxy * 1.0f / ((float)(sumxx * sumyy) + eps));
}
}
void Cuda_CosineSimilarity(float * inputA, float * ASquare, int ASize, float * inputB, float * BSquare, int BSize,
int dim, float * outputC, int * matchIdxA, int * matchIdxB, int matchSize, float eps)
{
int nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK;
int nBlockPerGrid = (matchSize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK;
cuda_CosineSimilarity << <nBlockPerGrid, nThreadPerBlock >> >(inputA, ASquare, ASize, inputB, BSquare, BSize, dim, outputC, matchIdxA, matchIdxB, matchSize, eps);
}
__global__ void cuda_Deriv_CosineSimilarity_partialMatching(float * src, float * tgt, float *srcSquare, float * tgtSquare, int dim,
int * src2MatchIdx, int * src2MatchElement, int * tgtIdx, int srcSize, int matchSize, float * simi, float * derivSimi,
float * dcSrc, float alpha, float eps)
{
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
//idx -> source/q Index.
if (idx < srcSize && idy < dim)
{
int matchBeginIdx = idx == 0 ? 0 : src2MatchIdx[idx - 1];
int matchEndIdx = src2MatchIdx[idx];
float sum = 0;
float qRoot = sqrtf(srcSquare[idx]);
float qv = src[idx * dim + idy];
float qSquare_qv = qv / (srcSquare[idx] + eps);
for (int match = matchBeginIdx; match < matchEndIdx; match++)
{
int mIdx = src2MatchElement[match];
int dIdx = tgtIdx[mIdx];
float dRoot = sqrtf(tgtSquare[dIdx]);
sum += derivSimi[mIdx] * (tgt[dIdx * dim + idy] / (qRoot * dRoot + eps) - qSquare_qv * simi[mIdx]); /// qSquare);
}
dcSrc[idx * dim + idy] = alpha * dcSrc[idx * dim + idy] + sum;
}
}
void Cuda_DerivCosineSimilarity(float * q, float * d, float *qSquare, float * dSquare, int dim,
int * src2MatchIdx, int * src2MatchElement, int * tgt2MatchIdx, int * tgt2MatchElement, int * srcIdx, int * tgtIdx, int srcSize, int tgtSize, int matchSize,
float * simi, float * derivSimi, float * dcq, float * dcd, float alpha, float eps)
{
dim3 srcThread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM);
dim3 srcBlock_tail((srcSize - 1) / DEFAULT_THREAD_PER_DIM + 1, (dim - 1) / DEFAULT_THREAD_PER_DIM + 1);
cuda_Deriv_CosineSimilarity_partialMatching << <srcBlock_tail, srcThread_tail >> >(q, d, qSquare, dSquare, dim,
src2MatchIdx, src2MatchElement, tgtIdx, srcSize, matchSize, simi, derivSimi, dcq, alpha, eps);
dim3 tgtThread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM);
dim3 tgtBlock_tail((tgtSize - 1) / DEFAULT_THREAD_PER_DIM + 1, (dim - 1) / DEFAULT_THREAD_PER_DIM + 1);
cuda_Deriv_CosineSimilarity_partialMatching << <tgtBlock_tail, tgtThread_tail >> >(d, q, dSquare, qSquare, dim,
tgt2MatchIdx, tgt2MatchElement, srcIdx, tgtSize, matchSize, simi, derivSimi, dcd, alpha, eps);
}
|
3d9123f47948060a8679e2d76afaf3a8a5a873af.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zparilu_kernels.cu, normal z -> c, Wed Jan 2 14:18:53 2019
*/
#include "magmasparse_internal.h"
#define PRECISION_c
__global__ void
magma_cparilu_csr_kernel(
const magma_int_t num_rows,
const magma_int_t nnz,
const magma_index_t *rowidxA,
const magma_index_t *colidxA,
const magmaFloatComplex * __restrict__ A,
const magma_index_t *rowptrL,
const magma_index_t *colidxL,
magmaFloatComplex *valL,
const magma_index_t *rowptrU,
const magma_index_t *colidxU,
magmaFloatComplex *valU)
{
int i, j;
int k = blockDim.x * blockIdx.x + threadIdx.x;
magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex s, sp;
int il, iu, jl, ju;
if (k < nnz) {
i = rowidxA[k];
j = colidxA[k];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
s = __ldg(A+k);
#else
s = A[k];
#endif
il = rowptrL[i];
iu = rowptrU[j];
while (il < rowptrL[i+1] && iu < rowptrU[j+1]) {
sp = zero;
jl = colidxL[il];
ju = colidxU[iu];
sp = (jl == ju) ? valL[il] * valU[iu] : sp;
s = (jl == ju) ? s-sp : s;
il = (jl <= ju) ? il+1 : il;
iu = (jl >= ju) ? iu+1 : iu;
}
s += sp; // undo the last operation (it must be the last)
if (i > j) // modify l entry
valL[il-1] = s / valU[rowptrU[j+1]-1];
else // modify u entry
valU[iu-1] = s;
}
}
/**
Purpose
-------
This routine iteratively computes an incomplete LU factorization.
For reference, see:
E. Chow and A. Patel: "Fine-grained Parallel Incomplete LU Factorization",
SIAM Journal on Scientific Computing, 37, C169-C193 (2015).
This routine was used in the ISC 2015 paper:
E. Chow et al.: "Asynchronous Iterative Algorithm for Computing Incomplete
Factorizations on GPUs",
ISC High Performance 2015, LNCS 9137, pp. 1-16, 2015.
The input format of the system matrix is COO, the lower triangular factor L
is stored in CSR, the upper triangular factor U is transposed, then also
stored in CSR (equivalent to CSC format for the non-transposed U).
Every component of L and U is handled by one thread.
Arguments
---------
@param[in]
A magma_c_matrix
input matrix A determing initial guess & processing order
@param[in,out]
L magma_c_matrix
input/output matrix L containing the lower triangular factor
@param[in,out]
U magma_c_matrix
input/output matrix U containing the upper triangular factor
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cparilu_csr(
magma_c_matrix A,
magma_c_matrix L,
magma_c_matrix U,
magma_queue_t queue)
{
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv(A.nnz, blocksize1);
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid(dimgrid1, dimgrid2, dimgrid3);
dim3 block(blocksize1, blocksize2, 1);
hipLaunchKernelGGL(( magma_cparilu_csr_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
A.num_rows, A.nnz, A.rowidx, A.col, A.val,
L.row, L.col, L.val,
U.row, U.col, U.val);
return MAGMA_SUCCESS;
}
|
3d9123f47948060a8679e2d76afaf3a8a5a873af.cu
|
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zparilu_kernels.cu, normal z -> c, Wed Jan 2 14:18:53 2019
*/
#include "magmasparse_internal.h"
#define PRECISION_c
__global__ void
magma_cparilu_csr_kernel(
const magma_int_t num_rows,
const magma_int_t nnz,
const magma_index_t *rowidxA,
const magma_index_t *colidxA,
const magmaFloatComplex * __restrict__ A,
const magma_index_t *rowptrL,
const magma_index_t *colidxL,
magmaFloatComplex *valL,
const magma_index_t *rowptrU,
const magma_index_t *colidxU,
magmaFloatComplex *valU)
{
int i, j;
int k = blockDim.x * blockIdx.x + threadIdx.x;
magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex s, sp;
int il, iu, jl, ju;
if (k < nnz) {
i = rowidxA[k];
j = colidxA[k];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
s = __ldg(A+k);
#else
s = A[k];
#endif
il = rowptrL[i];
iu = rowptrU[j];
while (il < rowptrL[i+1] && iu < rowptrU[j+1]) {
sp = zero;
jl = colidxL[il];
ju = colidxU[iu];
sp = (jl == ju) ? valL[il] * valU[iu] : sp;
s = (jl == ju) ? s-sp : s;
il = (jl <= ju) ? il+1 : il;
iu = (jl >= ju) ? iu+1 : iu;
}
s += sp; // undo the last operation (it must be the last)
if (i > j) // modify l entry
valL[il-1] = s / valU[rowptrU[j+1]-1];
else // modify u entry
valU[iu-1] = s;
}
}
/**
Purpose
-------
This routine iteratively computes an incomplete LU factorization.
For reference, see:
E. Chow and A. Patel: "Fine-grained Parallel Incomplete LU Factorization",
SIAM Journal on Scientific Computing, 37, C169-C193 (2015).
This routine was used in the ISC 2015 paper:
E. Chow et al.: "Asynchronous Iterative Algorithm for Computing Incomplete
Factorizations on GPUs",
ISC High Performance 2015, LNCS 9137, pp. 1-16, 2015.
The input format of the system matrix is COO, the lower triangular factor L
is stored in CSR, the upper triangular factor U is transposed, then also
stored in CSR (equivalent to CSC format for the non-transposed U).
Every component of L and U is handled by one thread.
Arguments
---------
@param[in]
A magma_c_matrix
input matrix A determing initial guess & processing order
@param[in,out]
L magma_c_matrix
input/output matrix L containing the lower triangular factor
@param[in,out]
U magma_c_matrix
input/output matrix U containing the upper triangular factor
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cparilu_csr(
magma_c_matrix A,
magma_c_matrix L,
magma_c_matrix U,
magma_queue_t queue)
{
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv(A.nnz, blocksize1);
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid(dimgrid1, dimgrid2, dimgrid3);
dim3 block(blocksize1, blocksize2, 1);
magma_cparilu_csr_kernel<<< grid, block, 0, queue->cuda_stream() >>>
(A.num_rows, A.nnz, A.rowidx, A.col, A.val,
L.row, L.col, L.val,
U.row, U.col, U.val);
return MAGMA_SUCCESS;
}
|
1eaaea4bd9e5d932c77e44de6c18fedef6644171.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "j3d27pt-32x32-4-128_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 54
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
(1.500f*A[t%2][i-1][j][k] +
0.500f*A[t%2][i-1][j-1][k-1] +
0.700f*A[t%2][i-1][j-1][k] +
0.900f*A[t%2][i-1][j-1][k+1] +
1.200f*A[t%2][i-1][j][k-1] +
1.201f*A[t%2][i-1][j][k+1] +
0.901f*A[t%2][i-1][j+1][k-1] +
0.701f*A[t%2][i-1][j+1][k] +
0.501f*A[t%2][i-1][j+1][k+1] +
1.510f*A[t%2][i][j][k] +
0.510f*A[t%2][i][j-1][k-1] +
0.710f*A[t%2][i][j-1][k] +
0.910f*A[t%2][i][j-1][k+1] +
1.210f*A[t%2][i][j][k-1] +
1.211f*A[t%2][i][j][k+1] +
0.911f*A[t%2][i][j+1][k-1] +
0.711f*A[t%2][i][j+1][k] +
0.511f*A[t%2][i][j+1][k+1] +
1.520f*A[t%2][i+1][j][k] +
0.520f*A[t%2][i+1][j-1][k-1] +
0.720f*A[t%2][i+1][j-1][k] +
0.920f*A[t%2][i+1][j-1][k+1] +
1.220f*A[t%2][i+1][j][k-1] +
1.221f*A[t%2][i+1][j][k+1] +
0.921f*A[t%2][i+1][j+1][k-1] +
0.721f*A[t%2][i+1][j+1][k] +
0.521f*A[t%2][i+1][j+1][k+1]) / 159;
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
1eaaea4bd9e5d932c77e44de6c18fedef6644171.cu
|
#include <assert.h>
#include <stdio.h>
#include "j3d27pt-32x32-4-128_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 54
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
(1.500f*A[t%2][i-1][j][k] +
0.500f*A[t%2][i-1][j-1][k-1] +
0.700f*A[t%2][i-1][j-1][k] +
0.900f*A[t%2][i-1][j-1][k+1] +
1.200f*A[t%2][i-1][j][k-1] +
1.201f*A[t%2][i-1][j][k+1] +
0.901f*A[t%2][i-1][j+1][k-1] +
0.701f*A[t%2][i-1][j+1][k] +
0.501f*A[t%2][i-1][j+1][k+1] +
1.510f*A[t%2][i][j][k] +
0.510f*A[t%2][i][j-1][k-1] +
0.710f*A[t%2][i][j-1][k] +
0.910f*A[t%2][i][j-1][k+1] +
1.210f*A[t%2][i][j][k-1] +
1.211f*A[t%2][i][j][k+1] +
0.911f*A[t%2][i][j+1][k-1] +
0.711f*A[t%2][i][j+1][k] +
0.511f*A[t%2][i][j+1][k+1] +
1.520f*A[t%2][i+1][j][k] +
0.520f*A[t%2][i+1][j-1][k-1] +
0.720f*A[t%2][i+1][j-1][k] +
0.920f*A[t%2][i+1][j-1][k+1] +
1.220f*A[t%2][i+1][j][k-1] +
1.221f*A[t%2][i+1][j][k+1] +
0.921f*A[t%2][i+1][j+1][k-1] +
0.721f*A[t%2][i+1][j+1][k] +
0.521f*A[t%2][i+1][j+1][k+1]) / 159;
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
8b55bc6a45970aa7ccb89308ad337213d2f94cf4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include <glm/gtc/matrix_inverse.hpp>
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#include <algorithm>
#include <stdlib.h>
#include <random>
#include <vector>
#include <stack>
#include <string>
#include <fstream>
#include <iostream>
#include <iomanip>
#include <glm/gtx/intersect.hpp>
#include "KDnode.h"
#include "KDtree.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int)(pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int)(pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int)(pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static PathSegment * dev_paths_cache = NULL;
static ShadeableIntersection * dev_intersections = NULL;
static const int STACK_SIZE = 4000;
struct is_zero_bounce
{
__host__ __device__
bool operator()(const PathSegment p)
{
return (p.remainingBounces == 0);
}
};
struct by_material_id
{
const PathSegment a;
by_material_id(PathSegment _a) : a(_a) {}
__host__ __device__
int operator()(const PathSegment& x, const PathSegment& y) const
{
return x.color.r + y.color.r;
}
};
__host__ __device__ bool operator<(const PathSegment &lhs, const PathSegment &rhs)
{
return lhs.materialIdHit < rhs.materialIdHit;
}
__host__ __device__ bool operator<(const ShadeableIntersection &lhs, const ShadeableIntersection &rhs)
{
return lhs.materialId < rhs.materialId;
}
struct NodeStack{
KDN::NodeBare* node;
float tmin;
float tmax;
glm::vec3 origin;
};
// ------------------------------------------------------------------------
// --------------------------- KD TREE UTILITIES --------------------------
// ------------------------------------------------------------------------
std::vector<KDN::Triangle*> getTrianglesFromFile(const char* path)
{
std::vector<KDN::Triangle*>triangles;
string line;
ifstream file(path);
if (file.is_open())
{
while (getline(file, line))
{
float x1 = atof(line.c_str());
getline(file, line); float y1 = atof(line.c_str());
getline(file, line); float z1 = atof(line.c_str());
getline(file, line); float x2 = atof(line.c_str());
getline(file, line); float y2 = atof(line.c_str());
getline(file, line); float z2 = atof(line.c_str());
getline(file, line); float x3 = atof(line.c_str());
getline(file, line); float y3 = atof(line.c_str());
getline(file, line); float z3 = atof(line.c_str());
KDN::Triangle* t = new KDN::Triangle(x1, y1, z1,
x2, y2, z2,
x3, y3, z3);
triangles.push_back(t);
}
}
return triangles;
}
// ------------------------------------------------------------------------
// --------------------------- END KD TREE UTILITIES ----------------------
// ------------------------------------------------------------------------
int obj_numshapes = 0;
int* obj_numpolyverts = NULL;
float* obj_verts = NULL;
float* obj_norms = NULL;
float* obj_texts = NULL;
int* obj_polyoffsets = NULL;
int* obj_polysidxflat = NULL;
float* obj_polysbboxes = NULL;
int* obj_materialOffsets = NULL;
// KD DATA
//KDN::KDnode* kd_nodes = NULL;
//KDN::Triangle* kd_triangles = NULL;
KDN::NodeBare* kd_nodesBare = NULL;
KDN::TriBare* kd_trianglesBare = NULL;
static int numNodes = 0;
static int numTriangles = 0;
void pathtraceInit(Scene *scene, bool enablekd) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_paths_cache, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// objloader part
if (scene->hasObj)
{
if (enablekd == false)
{
hipMalloc((void**)&obj_numpolyverts, scene->obj_numshapes * sizeof(int));
hipMalloc((void**)&obj_polyoffsets, scene->obj_numshapes * sizeof(int));
hipMalloc((void**)&obj_polysidxflat, scene->polyidxcount * sizeof(int));
hipMalloc((void**)&obj_verts, scene->objmesh->attrib.vertices.size()* sizeof(float));
hipMalloc((void**)&obj_norms, scene->objmesh->attrib.normals.size()* sizeof(float));
hipMalloc((void**)&obj_texts, scene->objmesh->attrib.texcoords.size()* sizeof(float));
hipMalloc((void**)&obj_polysbboxes, scene->obj_numshapes * 6 * sizeof(float));
}
hipMalloc((void**)&obj_materialOffsets, scene->obj_numshapes * sizeof(int));
// ------------------------------------------------------------------
// KD DATA PART
// ------------------------------------------------------------------
if (enablekd == true)
{
hipMalloc((void**)&kd_nodesBare, scene->numNodes * sizeof(KDN::NodeBare));
hipMalloc((void**)&kd_trianglesBare, scene->numTriangles * sizeof(KDN::TriBare));
hipMemcpy(kd_nodesBare, scene->newNodesBare, scene->numNodes * sizeof(KDN::NodeBare), hipMemcpyHostToDevice);
hipMemcpy(kd_trianglesBare, scene->newTrianglesBare, scene->numTriangles * sizeof(KDN::TriBare), hipMemcpyHostToDevice);
}
else
{
hipMemcpy(obj_numpolyverts, scene->obj_numpolyverts, scene->obj_numshapes * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(obj_polyoffsets, scene->obj_polyoffsets, scene->obj_numshapes * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(obj_polysidxflat, scene->obj_polysidxflat, scene->polyidxcount * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(obj_verts, scene->obj_verts, scene->objmesh->attrib.vertices.size()* sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(obj_norms, scene->obj_norms, scene->objmesh->attrib.normals.size()* sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(obj_texts, scene->obj_texts, scene->objmesh->attrib.texcoords.size()* sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(obj_polysbboxes, scene->obj_bboxes, scene->obj_numshapes * 6 * sizeof(float), hipMemcpyHostToDevice);
}
hipMemcpy(obj_materialOffsets, scene->obj_materialOffsets, scene->obj_numshapes * sizeof(int), hipMemcpyHostToDevice);
}
/*
// compare the compressed sizes
printf("sizeof KDnode: %d\n", sizeof(KDN::KDnode));
printf("sizeof KDtriangle: %d\n", sizeof(KDN::Triangle));
printf("sizeof NodeBare: %d\n", sizeof(KDN::NodeBare));
printf("sizeof TriBare: %d\n", sizeof(KDN::TriBare));
// sizeof KDnode: 136
// sizeof KDtriangle: 116
// sizeof NodeBare: 64
// sizeof TriBare: 76
*/
checkCUDAError("pathtraceInit");
}
void pathtraceFree(Scene *scene, bool enablekd) {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_paths_cache);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
// objloader part
if (scene->hasObj)
{
if (enablekd == false)
{
hipFree(obj_numpolyverts);
hipFree(obj_polyoffsets);
hipFree(obj_polysidxflat);
hipFree(obj_verts);
hipFree(obj_norms);
hipFree(obj_texts);
}
hipFree(obj_materialOffsets);
if (enablekd == true)
{
hipFree(kd_nodesBare);
hipFree(kd_trianglesBare);
}
}
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments, float focalLength, float dofAngle, bool antialias)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
segment.ray.isinside = false;
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
// antialiasing by jittering the ray
thrust::default_random_engine rng(utilhash(iter));
thrust::uniform_real_distribution<float> unitDistrib(0, 1);
if (antialias)
{
float jitterscale = 0.002;
bool fast = true;
if (fast)
{
// use cheap jitter
glm::vec3 v3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
v3 = glm::normalize(v3);
segment.ray.direction += v3*jitterscale;
segment.ray.direction = glm::normalize(segment.ray.direction);
}
else
{
// use uniform spherical distribution
float u = cos(PI * (float)unitDistrib(rng));
float u2 = u*u;
float sqrt1minusu2 = sqrt(1 - u2);
float theta = 2 * PI * (float)unitDistrib(rng);
glm::vec3 v3(sqrt1minusu2 * cos(theta),
sqrt1minusu2 * sin(theta),
u);
segment.ray.direction += v3*jitterscale;
}
}
// use uniform spherical distribution
float u = cos(PI * (float)unitDistrib(rng));
float u2 = u*u;
float sqrt1minusu2 = sqrt(1 - u2);
float theta = 2 * PI * (float)unitDistrib(rng);
glm::vec3 v3(sqrt1minusu2 * cos(theta),
sqrt1minusu2 * sin(theta),
u);
v3 = glm::normalize(v3);
glm::vec3 center = cam.position + 8.0f * segment.ray.direction;
float R1 = (float)unitDistrib(rng);
float R2 = (float)unitDistrib(rng);
glm::vec3 front = glm::normalize(cam.lookAt);
glm::vec3 up = glm::normalize(cam.up);
glm::vec3 right = glm::normalize(cam.right);
glm::quat Q1;
float randangle = (float)unitDistrib(rng) * PI * dofAngle;
Q1.w = cosf(randangle / 2.0f);
Q1.x = v3.x * sinf(randangle / 2.0f);
Q1.y = v3.y * sinf(randangle / 2.0f);
Q1.z = v3.z * sinf(randangle / 2.0f);
glm::vec3 randrot = glm::rotate(Q1, segment.ray.direction);
segment.ray.origin = segment.ray.origin + segment.ray.direction * focalLength - randrot*focalLength;
segment.ray.direction = randrot;
segment.ray.direction = glm::normalize(segment.ray.direction);
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// pathTraceOneBounce handles ray intersections, generate intersections for shading,
// and scatter new ray. You might want to call scatterRay from interactions.h
__global__ void pathTraceOneBounce(
int depth
, int iter
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, Material * materials
, int material_size
, ShadeableIntersection * intersections
, float softness
, int obj_numshapes
, int* obj_numpolyverts
, float* obj_verts
, float* obj_norms
, float* obj_texts
, int* obj_polyoffsets
, int* obj_polysidxflat
, float* obj_polysbboxes
, int polyidxcount
, int* obj_materialOffsets
, bool hasobj
, bool usebbox
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
if (pathSegments[path_index].remainingBounces>0)
{
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
glm::vec3 hit;
glm::vec3 norm;
glm::vec3 bary;
glm::vec3 v1;
glm::vec3 v2;
glm::vec3 v3;
glm::vec3 n1;
glm::vec3 n2;
glm::vec3 n3;
int pidxo1 = 0;
int pidxo2 = 0;
int pidxo3 = 0;
bool intersected = false;
bool obj_intersect = false;
// naive parse through global geoms
int objMaterialIdx = -1;
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
// start polygon hits
objMaterialIdx = -1;
int iterator = 0;
if (hasobj)
{
for (int i = 0; i < obj_numshapes; i++)
{
objMaterialIdx = obj_materialOffsets[i];
// check bounding intersection first
float T = -1.0f;
if (usebbox)
{
T = intersectBbox(pathSegment.ray.origin,
pathSegment.ray.direction,
glm::vec3(obj_polysbboxes[i] - 0.01,
obj_polysbboxes[i + 1] - 0.01,
obj_polysbboxes[i + 2] - 0.01),
glm::vec3(obj_polysbboxes[i + 3] + 0.01,
obj_polysbboxes[i + 4] + 0.01,
obj_polysbboxes[i + 5] + 0.01));
}
else
{
T = 0;
}
if (T > -1.0f)
{
for (int j = iterator; j < iterator + obj_polyoffsets[i]; j += 3)
{
pidxo1 = 3 * obj_polysidxflat[j];
pidxo2 = 3 * obj_polysidxflat[j + 1];
pidxo3 = 3 * obj_polysidxflat[j + 2];
v1.x = obj_verts[pidxo1];
v1.y = obj_verts[pidxo1 + 1];
v1.z = obj_verts[pidxo1 + 2];
v2.x = obj_verts[pidxo2];
v2.y = obj_verts[pidxo2 + 1];
v2.z = obj_verts[pidxo2 + 2];
v3.x = obj_verts[pidxo3];
v3.y = obj_verts[pidxo3 + 1];
v3.z = obj_verts[pidxo3 + 2];
n1.x = obj_norms[pidxo1];
n1.y = obj_norms[pidxo1 + 1];
n1.z = obj_norms[pidxo1 + 2];
n2.x = obj_norms[pidxo2];
n2.y = obj_norms[pidxo2 + 1];
n2.z = obj_norms[pidxo2 + 2];
n3.x = obj_norms[pidxo3];
n3.y = obj_norms[pidxo3 + 1];
n3.z = obj_norms[pidxo3 + 2];
intersected = false;
bary.x = 0.0f; bary.y = 0.0f; bary.z = 0.0f;
intersected = glm::intersectRayTriangle(pathSegment.ray.origin,
pathSegment.ray.direction,
v1, v2, v3, bary);
glm::vec3 bary2(bary.x, bary.y, 1.0 - bary.x - bary.y);
if (intersected)
{
hit = pathSegment.ray.origin + pathSegment.ray.direction* bary.z;
norm = glm::normalize((1 - bary.x - bary.y) * n1 + bary.x * n2 + (bary.y) * n3);
hit += norm*0.0001f;
t = glm::distance(pathSegment.ray.origin, hit);
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = obj_materialOffsets[i];
intersect_point = hit;
normal = norm;
tmp_intersect = hit;
tmp_normal = normal;
obj_intersect = true;
intersections[path_index].t = t;
}
}
}
iterator += obj_polyoffsets[i];
}
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
// updating rays
thrust::default_random_engine rng = makeSeededRandomEngine(iter, path_index, depth);
if (obj_intersect)
{
pathSegments[path_index].materialIdHit = objMaterialIdx;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[objMaterialIdx],
rng,
softness);
}
else
{
pathSegments[path_index].materialIdHit = geoms[hit_geom_index].materialid;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[geoms[hit_geom_index].materialid],
rng,
softness);
}
if (obj_intersect)
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = objMaterialIdx;
intersections[path_index].surfaceNormal = normal;
}
else
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
}
}
// pathTraceOneBounce handles ray intersections, generate intersections for shading,
// and scatter new ray. You might want to call scatterRay from interactions.h
__global__ void pathTraceOneBounceKDfix(
int depth
, int iter
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, Material * materials
, int material_size
, ShadeableIntersection * intersections
, float softness
, KDN::Triangle* triangles
, int numTriangles
, KDN::KDnode* nodes
, int numNodes
, int* obj_materialOffsets
, bool hasobj
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
if (pathSegments[path_index].remainingBounces>0)
{
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
glm::vec3 hit;
glm::vec3 norm;
glm::vec3 bary;
glm::vec3 v1;
glm::vec3 v2;
glm::vec3 v3;
glm::vec3 n1;
glm::vec3 n2;
glm::vec3 n3;
int pidxo1 = 0;
int pidxo2 = 0;
int pidxo3 = 0;
bool intersected = false;
bool obj_intersect = false;
// naive parse through global geoms
int objMaterialIdx = -1;
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
objMaterialIdx = -1;
int iterator = 0;
if (hasobj)
{
if (numNodes != 0)
{
bool nodeIDs[100] = { false };
int currID = nodeIDs[nodes[0].ID];
float dist = -1.0;
// get the root node
for (int i = 0; i < numNodes; i++)
{
if (nodes[i].parentID == -1)
{
currID = nodes[i].ID;
break;
}
}
KDN::KDnode* node = &(nodes[currID]);
bool hitGeom = false;
float boxdist = -1.0f;
bary.z = FLT_MAX;
while (true)
{
if (currID == -1)
break;
node = &(nodes[currID]);
// check if it intersects the bounds
if (nodeIDs[currID] == true)
{
nodeIDs[node->ID] = true;
nodeIDs[node->leftID] = true;
nodeIDs[node->rightID] = true;
currID = node->parentID;
continue;
}
else
{
hitGeom = intersectAABB(pathSegment.ray, node->bbox, dist);
if (hitGeom == false && node->parentID == -1)
break;
}
if (hitGeom == false && dist > bary.z)
{
nodeIDs[node->ID] = true;
nodeIDs[node->leftID] = true;
nodeIDs[node->rightID] = true;
currID = node->parentID;
}
else
{
if (nodes[currID].leftID != -1 && nodeIDs[nodes[currID].leftID] != true)
currID = node->leftID;
else if (nodes[currID].rightID != -1 && nodeIDs[nodes[currID].rightID] != true)
currID = node->rightID;
else if (nodeIDs[node->ID] == false)
{
nodeIDs[node->ID] = true;
int size = node->triIdSize;
if (size > 0)
{
int start = node->triIdStart;
int end = start + size;
for (int i = start; i < end; i++)
{
KDN::Triangle* T = &(triangles[i]);
glm::vec3 v1(T->x1, T->y1, T->z1);
glm::vec3 v2(T->x2, T->y2, T->z2);
glm::vec3 v3(T->x3, T->y3, T->z3);
glm::vec3 n1(T->nx1, T->ny1, T->nz1);
glm::vec3 n2(T->nx2, T->ny2, T->nz2);
glm::vec3 n3(T->nx3, T->ny3, T->nz3);
bool intersected = glm::intersectRayTriangle(pathSegment.ray.origin,
pathSegment.ray.direction,
v1, v2, v3, bary);
if (intersected)
{
objMaterialIdx = triangles[i].mtlIdx + material_size - 1;
hit = pathSegment.ray.origin + pathSegment.ray.direction* bary.z;// (bary2.x * v1 + bary2.y * v2 + bary2.z * v3);
norm = glm::normalize((1 - bary.x - bary.y) * n1 + bary.x * n2 + (bary.y) * n3);
hit += norm*0.0001f;
t = glm::distance(pathSegment.ray.origin, hit);
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = obj_materialOffsets[T->mtlIdx];
intersect_point = hit;
normal = norm;
tmp_intersect = hit;
tmp_normal = normal;
obj_intersect = true;
intersections[path_index].t = t;
}
}
}
}
}
else
currID = node->parentID;
}
}
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
thrust::default_random_engine rng = makeSeededRandomEngine(iter, path_index, depth);
if (obj_intersect)
{
pathSegments[path_index].materialIdHit = objMaterialIdx;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[objMaterialIdx],
rng,
softness);
}
else
{
pathSegments[path_index].materialIdHit = geoms[hit_geom_index].materialid;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[geoms[hit_geom_index].materialid],
rng,
softness);
}
if (obj_intersect)
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = objMaterialIdx;
intersections[path_index].surfaceNormal = normal;
}
else
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
}
}
__host__ __device__
void traverseKDbare(KDN::NodeBare* nodes, int numNodes,
float& t,
PathSegment pathSegment,
KDN::TriBare* triangles,
glm::vec3& bary,
int& objMaterialIdx,
int& material_size,
glm::vec3& hit,
glm::vec3& norm,
float& t_min,
int& hit_geom_index,
glm::vec3& intersect_point,
glm::vec3& normal,
glm::vec3& tmp_intersect,
glm::vec3& tmp_normal,
bool& obj_intersect,
ShadeableIntersection* intersections,
int* obj_materialOffsets,
int& path_index)
{
if (numNodes != 0)
{
bool nodeIDs[STACK_SIZE] = { false };
int currID = nodeIDs[nodes[0].ID];
float dist = -1.0;
// get the root node
for (int i = 0; i < numNodes; i++)
{
if (nodes[i].parentID == -1)
{
currID = nodes[i].ID;
break;
}
}
KDN::NodeBare* node = &(nodes[currID]);
bool hitGeom = false;
float boxdist = -1.0f;
bary.z = FLT_MAX;
while (true)
{
if (currID == -1)
break;
node = &(nodes[currID]);
// check if it intersects the bounds
if (hitGeom == false && node->parentID == -1 && nodeIDs[node->ID] == true)
break;
hitGeom = intersectAABBarrays(pathSegment.ray, nodes[currID].mins, nodes[currID].maxs, dist);
if (nodeIDs[currID] == true)
{
nodeIDs[node->ID] = true;
nodeIDs[node->leftID] = true;
nodeIDs[node->rightID] = true;
currID = node->parentID;
continue;
}
else
{
// if we reached the top and didn't hit anything
if (hitGeom == false && node->parentID == -1)
break;
}
// if the distance is greater than the last poly hit
if (hitGeom == false || dist > bary.z)
{
nodeIDs[node->ID] = true;
nodeIDs[node->leftID] = true;
nodeIDs[node->rightID] = true;
currID = node->parentID;
}
else
{
if (nodes[currID].leftID != -1 && nodeIDs[nodes[currID].leftID] != true)
currID = node->leftID;
else if (nodes[currID].rightID != -1 && nodeIDs[nodes[currID].rightID] != true)
currID = node->rightID;
else if (nodeIDs[node->ID] == false)
{
nodeIDs[node->ID] = true;
int size = node->triIdSize;
if (size > 0)
{
int start = node->triIdStart;
int end = start + size;
for (int i = start; i < end; i++)
{
KDN::TriBare* T = &(triangles[i]);
glm::vec3 v1(T->x1, T->y1, T->z1);
glm::vec3 v2(T->x2, T->y2, T->z2);
glm::vec3 v3(T->x3, T->y3, T->z3);
bool intersected = glm::intersectRayTriangle(pathSegment.ray.origin,
pathSegment.ray.direction,
v1, v2, v3, bary);
if (intersected)
{
glm::vec3 n1(T->nx1, T->ny1, T->nz1);
glm::vec3 n2(T->nx2, T->ny2, T->nz2);
glm::vec3 n3(T->nx3, T->ny3, T->nz3);
objMaterialIdx = triangles[i].mtlIdx + material_size - 1;
hit = pathSegment.ray.origin + pathSegment.ray.direction* bary.z;
norm = glm::normalize((1 - bary.x - bary.y) * n1 + bary.x * n2 + (bary.y) * n3);
hit += norm*0.00001f;
t = glm::distance(pathSegment.ray.origin, hit);
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = obj_materialOffsets[T->mtlIdx];
intersect_point = hit;
normal = norm;
tmp_intersect = hit;
tmp_normal = normal;
obj_intersect = true;
intersections[path_index].t = t;
}
}
}
}
}
else
currID = node->parentID;
}
}
}
}
__host__ __device__
void traverseKDbareShortHybrid(KDN::NodeBare* nodes, int numNodes,
float& t,
PathSegment pathSegment,
KDN::TriBare* triangles,
glm::vec3& bary,
int& objMaterialIdx,
int& material_size,
glm::vec3& hit,
glm::vec3& norm,
float& t_min,
int& hit_geom_index,
glm::vec3& intersect_point,
glm::vec3& normal,
glm::vec3& tmp_intersect,
glm::vec3& tmp_normal,
bool& obj_intersect,
ShadeableIntersection* intersections,
int* obj_materialOffsets,
int& path_index)
{
if (numNodes != 0)
{
bool nodeIDs[STACK_SIZE] = { false };
int currID = nodeIDs[nodes[0].ID];
float dist = -1.0;
// get the root node
for (int i = 0; i < numNodes; i++)
{
if (nodes[i].parentID == -1)
{
currID = nodes[i].ID;
break;
}
}
KDN::NodeBare* node = &(nodes[currID]);
int axis;
float tSplit;
bool hitGeom = false;
float boxdist = -1.0f;
bary.z = FLT_MAX;
while (true)
{
if (currID == -1)
break;
node = &(nodes[currID]);
// check if it intersects the bounds
if (hitGeom == false && node->parentID == -1 && nodeIDs[node->ID] == true)
break;
hitGeom = intersectAABBarrays(pathSegment.ray, nodes[currID].mins, nodes[currID].maxs, dist);
if (nodeIDs[currID] == true)
{
nodeIDs[node->ID] = true;
nodeIDs[node->leftID] = true;
nodeIDs[node->rightID] = true;
currID = node->parentID;
continue;
}
else
{
// if we reached the top and didn't hit anything
if (hitGeom == false && node->parentID == -1)
break;
}
// if the distance is greater than the last poly hit
if (hitGeom == false || dist > bary.z)
{
nodeIDs[node->ID] = true;
nodeIDs[node->leftID] = true;
nodeIDs[node->rightID] = true;
currID = node->parentID;
}
else
{
axis = node->axis;
if (pathSegment.ray.direction[axis] > 0.0f)
{
// left side first
if (nodes[currID].leftID != -1 && nodeIDs[nodes[currID].leftID] != true)
currID = node->leftID;
else if (nodes[currID].rightID != -1 && nodeIDs[nodes[currID].rightID] != true)
currID = node->rightID;
else if (nodeIDs[node->ID] == false)
{
nodeIDs[node->ID] = true;
int size = node->triIdSize;
if (size > 0)
{
int start = node->triIdStart;
int end = start + size;
for (int i = start; i < end; i++)
{
KDN::TriBare* T = &(triangles[i]);
glm::vec3 v1(T->x1, T->y1, T->z1);
glm::vec3 v2(T->x2, T->y2, T->z2);
glm::vec3 v3(T->x3, T->y3, T->z3);
bool intersected = glm::intersectRayTriangle(pathSegment.ray.origin,
pathSegment.ray.direction,
v1, v2, v3, bary);
if (intersected)
{
// skip other side
nodeIDs[nodes[nodeIDs[node->parentID]].rightID] = true;
glm::vec3 n1(T->nx1, T->ny1, T->nz1);
glm::vec3 n2(T->nx2, T->ny2, T->nz2);
glm::vec3 n3(T->nx3, T->ny3, T->nz3);
objMaterialIdx = triangles[i].mtlIdx + material_size - 1;
hit = pathSegment.ray.origin + pathSegment.ray.direction* bary.z;
norm = glm::normalize((1 - bary.x - bary.y) * n1 + bary.x * n2 + (bary.y) * n3);
hit += norm*0.0001f;
t = glm::distance(pathSegment.ray.origin, hit);
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = obj_materialOffsets[T->mtlIdx];
intersect_point = hit;
normal = norm;
tmp_intersect = hit;
tmp_normal = normal;
obj_intersect = true;
intersections[path_index].t = t;
}
}
}
}
}
else
currID = node->parentID;
}
else
{
// right side first
if (nodes[currID].rightID != -1 && nodeIDs[nodes[currID].rightID] != true)
currID = node->rightID;
else if (nodes[currID].leftID != -1 && nodeIDs[nodes[currID].leftID] != true)
currID = node->leftID;
else if (nodeIDs[node->ID] == false)
{
nodeIDs[node->ID] = true;
int size = node->triIdSize;
if (size > 0)
{
int start = node->triIdStart;
int end = start + size;
for (int i = start; i < end; i++)
{
KDN::TriBare* T = &(triangles[i]);
glm::vec3 v1(T->x1, T->y1, T->z1);
glm::vec3 v2(T->x2, T->y2, T->z2);
glm::vec3 v3(T->x3, T->y3, T->z3);
bool intersected = glm::intersectRayTriangle(pathSegment.ray.origin,
pathSegment.ray.direction,
v1, v2, v3, bary);
if (intersected)
{
// skip other side
nodeIDs[nodes[nodeIDs[node->parentID]].leftID] = true;
glm::vec3 n1(T->nx1, T->ny1, T->nz1);
glm::vec3 n2(T->nx2, T->ny2, T->nz2);
glm::vec3 n3(T->nx3, T->ny3, T->nz3);
objMaterialIdx = triangles[i].mtlIdx + material_size - 1;
hit = pathSegment.ray.origin + pathSegment.ray.direction* bary.z;
norm = glm::normalize((1 - bary.x - bary.y) * n1 + bary.x * n2 + (bary.y) * n3);
hit += norm*0.0001f;
t = glm::distance(pathSegment.ray.origin, hit);
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = obj_materialOffsets[T->mtlIdx];
intersect_point = hit;
normal = norm;
tmp_intersect = hit;
tmp_normal = normal;
obj_intersect = true;
intersections[path_index].t = t;
}
}
}
}
}
else
currID = node->parentID;
}
}
}
}
}
__host__ __device__
void traverseKDshort(KDN::NodeBare* nodes, int numNodes,
float& t,
PathSegment pathSegment,
KDN::TriBare* triangles,
glm::vec3& bary,
int& objMaterialIdx,
int& material_size,
glm::vec3& hit,
glm::vec3& norm,
float& t_min,
int& hit_geom_index,
glm::vec3& intersect_point,
glm::vec3& normal,
glm::vec3& tmp_intersect,
glm::vec3& tmp_normal,
bool& obj_intersect,
ShadeableIntersection* intersections,
int* obj_materialOffsets,
int& path_index)
{
NodeStack stack[STACK_SIZE];
int top = -1;
KDN::NodeBare* node;
KDN::NodeBare* root;
KDN::NodeBare* first;
KDN::NodeBare* second;
// get the root node
for (int i = 0; i < numNodes; i++)
{
if (nodes[i].parentID == -1)
{
node = &(nodes[i]);
root = &(nodes[i]);
break;
}
}
float tMin, tMax, tHit, sceneMax;
tMin = tMax = 0.0f;
tHit = t_min;
sceneMax = FLT_MAX;
bool pushdown = false;
int axis = 0;
float tSplit = 0.0f;
float dist = 0.0f;
bool bboxintersect = false;
while (tMax < sceneMax)
{
if (top == -1)
{
node = root;
tMin = tMax;
tMax = sceneMax;
pushdown = true;
}
else
{
node = stack[top].node;
tMin = node->tmin;
tMax = node->tmax;
top--;
pushdown = false;
}
while (node->triIdSize != 0)
{
axis = node->axis;
tSplit = (node->splitPos - pathSegment.ray.origin[axis]) / pathSegment.ray.direction[axis];
if (pathSegment.ray.direction[axis] > 0.0f)
{
if (nodes[node->leftID].mins[axis] < nodes[node->rightID].mins[axis])
{
first = &(nodes[node->leftID]);
second = &(nodes[node->rightID]);
}
else
{
first = &nodes[node->rightID];
second = &nodes[node->leftID];
}
}
else
{
if (nodes[node->leftID].maxs[axis] > nodes[node->rightID].maxs[axis])
{
first = &(nodes[node->leftID]);
second = &(nodes[node->rightID]);
}
else
{
first = &(nodes[node->rightID]);
second = &(nodes[node->leftID]);
}
}
if (tSplit >= tMax || tSplit < 0.0f)
node = first;
else if (tSplit <= tMin)
node = second;
else
{
second->tmin = tSplit;
second->tmax = tMax;
top++;
if (top <= 199)
{
stack[top].node = second;
stack[top].tmin = tSplit;
stack[top].tmax = tMax;
}
else
{
break;
}
node = first;
tMax = tSplit;
pushdown = false;
}
if (pushdown)
root = node;
bboxintersect = intersectAABBarrays(pathSegment.ray, node->mins, node->maxs, dist);
if (bboxintersect)
{
int start = node->triIdStart;
int end = start + node->triIdSize;
for (int i = start; i < end; i++)
{
KDN::TriBare* T = &(triangles[i]);
glm::vec3 v1(T->x1, T->y1, T->z1);
glm::vec3 v2(T->x2, T->y2, T->z2);
glm::vec3 v3(T->x3, T->y3, T->z3);
bool intersected = glm::intersectRayTriangle(pathSegment.ray.origin,
pathSegment.ray.direction,
v1, v2, v3, bary);
if (intersected)
{
glm::vec3 n1(T->nx1, T->ny1, T->nz1);
glm::vec3 n2(T->nx2, T->ny2, T->nz2);
glm::vec3 n3(T->nx3, T->ny3, T->nz3);
objMaterialIdx = triangles[i].mtlIdx + material_size - 1;
hit = pathSegment.ray.origin + pathSegment.ray.direction* bary.z;
norm = -glm::normalize((1 - bary.x - bary.y) * n1 + bary.x * n2 + (bary.y) * n3);
hit += norm*0.0001f;
t = glm::distance(pathSegment.ray.origin, hit);
if (t > 0.0f || t_min > t)
{
tHit = min(tHit, t);
t_min = t;
hit_geom_index = obj_materialOffsets[T->mtlIdx];
intersect_point = hit;
normal = norm;
tmp_intersect = hit;
tmp_normal = normal;
obj_intersect = true;
intersections[path_index].t = t;
}
}
}
}
}
}
}
__host__ __device__
void traverseKD(KDN::NodeBare* nodes, int numNodes,
float& t,
PathSegment pathSegment,
KDN::TriBare* triangles,
glm::vec3& bary,
int& objMaterialIdx,
int& material_size,
glm::vec3& hit,
glm::vec3& norm,
float& t_min,
int& hit_geom_index,
glm::vec3& intersect_point,
glm::vec3& normal,
glm::vec3& tmp_intersect,
glm::vec3& tmp_normal,
bool& obj_intersect,
ShadeableIntersection* intersections,
int* obj_materialOffsets,
int& path_index)
{
NodeStack stack[STACK_SIZE];
int top = -1;
KDN::NodeBare* node;
KDN::NodeBare* root;
KDN::NodeBare* first;
KDN::NodeBare* second;
// get the root node
for (int i = 0; i < numNodes; i++)
{
if (nodes[i].parentID == -1)
{
node = &(nodes[i]);
root = &(nodes[i]);
break;
}
}
glm::vec3 origin = pathSegment.ray.origin;
glm::vec3 invDirection(1.0f / pathSegment.ray.direction[0],
1.0f / pathSegment.ray.direction[1],
1.0f / pathSegment.ray.direction[2]);
float tmax = FLT_MAX;
float tClosestIntersection = t_min;
bool notFullyTraversed = true;
while (notFullyTraversed)
{
if (node->triIdSize != 0)
{
//test all primitives inside the leaf
float dist = 0.0f;
bool bboxintersect = intersectAABBarrays(pathSegment.ray, node->mins, node->maxs, dist);
if (bboxintersect)
{
int start = node->triIdStart;
int end = start + node->triIdSize;
for (int i = start; i < end; i++)
{
KDN::TriBare* T = &(triangles[i]);
glm::vec3 v1(T->x1, T->y1, T->z1);
glm::vec3 v2(T->x2, T->y2, T->z2);
glm::vec3 v3(T->x3, T->y3, T->z3);
bool intersected = glm::intersectRayTriangle(pathSegment.ray.origin,
pathSegment.ray.direction,
v1, v2, v3, bary);
if (intersected)
{
glm::vec3 n1(T->nx1, T->ny1, T->nz1);
glm::vec3 n2(T->nx2, T->ny2, T->nz2);
glm::vec3 n3(T->nx3, T->ny3, T->nz3);
objMaterialIdx = triangles[i].mtlIdx + material_size - 1;
hit = pathSegment.ray.origin + pathSegment.ray.direction* bary.z;
norm = -glm::normalize((1 - bary.x - bary.y) * n1 + bary.x * n2 + (bary.y) * n3);
hit += norm*0.0001f;
t = glm::distance(pathSegment.ray.origin, hit);
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = obj_materialOffsets[T->mtlIdx];
intersect_point = hit;
normal = norm;
tmp_intersect = hit;
tmp_normal = normal;
obj_intersect = true;
intersections[path_index].t = t;
//return;
}
}
}
}
//test if leaf + empty stack => return
if (top == -1)
{
notFullyTraversed = false;
}
else
{
//pop all stack
origin = stack[top].origin;
tmax = stack[top].tmax;
node = stack[top].node;
top--;
}
}
else
{
//get axis of node and its split plane
const int axis = node->axis;
const float plane = node->splitPos;
//test if ray is not parallel to plane
if ((fabs(pathSegment.ray.direction[axis]) > EPSILON))
{
const float t = (plane - origin[axis]) * invDirection[axis];
//case of the ray intersecting the plane, then test both childs
if (0.0f < t && t < tmax)
{
//traverse near first, then far. Set tmax = t for near
//push only far child onto stack
top++;
stack[top].origin[0] = origin[0] + pathSegment.ray.direction[0] * t;
stack[top].origin[1] = origin[1] + pathSegment.ray.direction[1] * t;
stack[top].origin[2] = origin[2] + pathSegment.ray.direction[2] * t;
stack[top].node = (origin[axis] > plane) ? &(nodes[node->leftID]) : &(nodes[node->rightID]);
stack[top].tmax = tmax - t;
tmax = t;
}
}
//in every case: traverse near child first
node = (origin[axis] > plane) ? &(nodes[node->rightID]) : &(nodes[node->leftID]);
}
}
}
// pathTraceOneBounce handles ray intersections, generate intersections for shading,
// and scatter new ray. You might want to call scatterRay from interactions.h
__global__ void pathTraceOneBounceKDbare(
int depth
, int iter
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, Material * materials
, int material_size
, ShadeableIntersection * intersections
, float softness
, KDN::TriBare* triangles
, int numTriangles
, KDN::NodeBare* nodes
, int numNodes
, int* obj_materialOffsets
, bool hasobj
, bool shortstack
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
if (pathSegments[path_index].remainingBounces>0)
{
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
glm::vec3 hit;
glm::vec3 norm;
glm::vec3 bary;
glm::vec3 v1;
glm::vec3 v2;
glm::vec3 v3;
glm::vec3 n1;
glm::vec3 n2;
glm::vec3 n3;
int pidxo1 = 0;
int pidxo2 = 0;
int pidxo3 = 0;
bool intersected = false;
bool obj_intersect = false;
// naive parse through global geoms
int objMaterialIdx = -1;
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
objMaterialIdx = -1;
int iterator = 0;
if (hasobj)
{
if (shortstack == false)
{
// standard traversal
traverseKDbare(nodes, numNodes, t,
pathSegment, triangles,
bary, objMaterialIdx,
material_size, hit,
norm, t_min,
hit_geom_index, intersect_point,
normal, tmp_intersect,
tmp_normal, obj_intersect,
intersections, obj_materialOffsets,
path_index);
}
else
{
// optimized short-stack traversal
traverseKDbareShortHybrid(nodes, numNodes, t,
pathSegment, triangles,
bary, objMaterialIdx,
material_size, hit,
norm, t_min,
hit_geom_index, intersect_point,
normal, tmp_intersect,
tmp_normal, obj_intersect,
intersections, obj_materialOffsets,
path_index);
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
// updating rays
thrust::default_random_engine rng = makeSeededRandomEngine(iter, path_index, depth);
if (obj_intersect)
{
pathSegments[path_index].materialIdHit = objMaterialIdx;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[objMaterialIdx],
rng,
softness);
}
else
{
pathSegments[path_index].materialIdHit = geoms[hit_geom_index].materialid;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[geoms[hit_geom_index].materialid],
rng,
softness);
}
if (obj_intersect)
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = objMaterialIdx;
intersections[path_index].surfaceNormal = normal;
}
else
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
}
}
// pathTraceOneBounce handles ray intersections, generate intersections for shading,
// and scatter new ray. You might want to call scatterRay from interactions.h
__global__ void pathTraceOneBounceKDbareBoxes(
int depth
, int iter
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, Material * materials
, int material_size
, ShadeableIntersection * intersections
, float softness
, KDN::TriBare* triangles
, int numTriangles
, KDN::NodeBare* nodes
, int numNodes
, int* obj_materialOffsets
, bool hasobj
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
if (pathSegments[path_index].remainingBounces>0)
{
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
glm::vec3 hit;
glm::vec3 norm;
glm::vec3 bary;
glm::vec3 v1;
glm::vec3 v2;
glm::vec3 v3;
glm::vec3 n1;
glm::vec3 n2;
glm::vec3 n3;
int pidxo1 = 0;
int pidxo2 = 0;
int pidxo3 = 0;
bool intersected = false;
bool obj_intersect = false;
// naive parse through global geoms
int objMaterialIdx = -1;
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
objMaterialIdx = -1;
int iterator = 0;
if (hasobj)
{
for (int i = 0; i < numNodes; i++)
{
t = boxIntersectionTestBox(nodes[i].mins, nodes[i].maxs, pathSegment.ray, tmp_intersect, tmp_normal, outside);
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = geoms_size;
intersect_point = tmp_intersect;
normal = tmp_normal;
obj_intersect = true;
objMaterialIdx = material_size - 1;
}
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
// updating rays
thrust::default_random_engine rng = makeSeededRandomEngine(iter, path_index, depth);
if (obj_intersect)
{
pathSegments[path_index].materialIdHit = objMaterialIdx;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[objMaterialIdx],
rng,
softness);
}
else
{
pathSegments[path_index].materialIdHit = geoms[hit_geom_index].materialid;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[geoms[hit_geom_index].materialid],
rng,
softness);
}
if (obj_intersect)
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = objMaterialIdx;
intersections[path_index].surfaceNormal = normal;
}
else
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
}
}
// pathTraceOneBounce handles ray intersections, generate intersections for shading,
// and scatter new ray. You might want to call scatterRay from interactions.h
__global__ void pathTraceOneBounceKDbareShortStack(
int depth
, int iter
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, Material * materials
, int material_size
, ShadeableIntersection * intersections
, float softness
, KDN::TriBare* triangles
, int numTriangles
, KDN::NodeBare* nodes
, int numNodes
, int* obj_materialOffsets
, bool hasobj
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
if (pathSegments[path_index].remainingBounces>0)
{
float t = 0.0;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
glm::vec3 hit;
glm::vec3 norm;
glm::vec3 bary;
glm::vec3 v1;
glm::vec3 v2;
glm::vec3 v3;
glm::vec3 n1;
glm::vec3 n2;
glm::vec3 n3;
int pidxo1 = 0;
int pidxo2 = 0;
int pidxo3 = 0;
bool intersected = false;
bool obj_intersect = false;
// naive parse through global geoms
int objMaterialIdx = -1;
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
objMaterialIdx = -1;
int iterator = 0;
if (hasobj)
{
// KD traversal standard short stack
traverseKDshort(nodes, numNodes, t,
pathSegment, triangles,
bary, objMaterialIdx,
material_size, hit,
norm, t_min,
hit_geom_index, intersect_point,
normal, tmp_intersect,
tmp_normal, obj_intersect,
intersections, obj_materialOffsets,
path_index);
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
// updating rays
thrust::default_random_engine rng = makeSeededRandomEngine(iter, path_index, depth);
if (obj_intersect)
{
pathSegments[path_index].materialIdHit = objMaterialIdx;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[objMaterialIdx],
rng,
softness);
}
else
{
pathSegments[path_index].materialIdHit = geoms[hit_geom_index].materialid;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[geoms[hit_geom_index].materialid],
rng,
softness);
}
if (obj_intersect)
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = objMaterialIdx;
intersections[path_index].surfaceNormal = normal;
}
else
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
}
}
// pathTraceOneBounce handles ray intersections, generate intersections for shading,
// This is the KD-tree implementation
__global__ void pathTraceOneBounceKD(
int depth
, int iter
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, Material * materials
, int material_size
, ShadeableIntersection * intersections
, float softness
, KDN::Triangle* triangles
, int numTriangles
, KDN::KDnode* nodes
, int numNodes
, bool hasobj
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
if (pathSegments[path_index].remainingBounces>0)
{
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
glm::vec3 hit;
glm::vec3 norm;
glm::vec3 bary;
glm::vec3 v1;
glm::vec3 v2;
glm::vec3 v3;
glm::vec3 n1;
glm::vec3 n2;
glm::vec3 n3;
int pidxo1 = 0;
int pidxo2 = 0;
int pidxo3 = 0;
bool intersected = false;
bool obj_intersect = false;
// naive parse through global geoms
int objMaterialIdx = -1;
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
objMaterialIdx = -1;
int iterator = 0;
if (hasobj)
{
// KDTREE TRAVERSAL
float dist = -1.0f;
glm::vec3 norm;
dist = -1.0f;
norm = glm::vec3(0.0f);
bool hitGeom = false;
Ray r = pathSegment.ray;
// USE AN ARRAY OF 0 NODE IDS AND SET THEM TO 1 once they're visited
// instead of using visited to avoid conflicts when reading from
// multiple threads
bool nodeIDs[1000] = { false };
if (numNodes != 0)
{
float mindist = FLT_MAX;
int currID = nodeIDs[nodes[0].ID];
// get the root node
for (int i = 0; i < numNodes; i++)
{
if (nodes[i].parentID == -1)
{
currID = nodes[i].ID;
break;
}
}
float boxdist = -1.0f;
while (true)
{
if (currID == -1)
break;
// check if it intersects the bounds
//printf("1\n");
hitGeom = intersectAABB(r, nodes[currID].bbox, dist);
//printf("2\n");
if (hitGeom == false)
{
nodeIDs[nodes[currID].ID] = true;
currID = nodes[currID].parentID;
}
else
{
if (nodes[currID].leftID != -1 && nodeIDs[nodes[currID].leftID] != true)
currID = nodes[currID].leftID;
else if (nodes[currID].rightID != -1 && nodeIDs[nodes[currID].rightID] != true)
currID = nodes[currID].rightID;
else if (nodeIDs[nodes[currID].ID] == false)
{
//std::cout << "NODE LOOP: " << nodes[currID].ID << " PARENT: " << nodes[currID].parentID << std::endl;
nodeIDs[nodes[currID].ID] = true;
int size = nodes[currID].triIdSize;
if (size > 0)
{
int start = nodes[currID].triIdStart;
int end = start + size;
for (int i = start; i < end; i++)
{
//KDN::Triangle t = triangles[i];
glm::vec3 v1(triangles[i].x1, triangles[i].y1, triangles[i].z1);
glm::vec3 v2(triangles[i].x2, triangles[i].y2, triangles[i].z2);
glm::vec3 v3(triangles[i].x3, triangles[i].y3, triangles[i].z3);
glm::vec3 barytemp(0.0f, 0.0f, 0.0f);
bool intersected = glm::intersectRayTriangle(r.origin,
r.direction,
v1, v2, v3, barytemp);
if (intersected && barytemp.z < mindist)
{
glm::vec3 bary(barytemp.x, barytemp.y, 1.0 - barytemp.x - barytemp.y);
glm::vec3 n1(triangles[i].nx1, triangles[i].ny1, triangles[i].nz1);
glm::vec3 n2(triangles[i].nx2, triangles[i].ny2, triangles[i].nz2);
glm::vec3 n3(triangles[i].nx3, triangles[i].ny3, triangles[i].nz3);
norm = (bary[0] * n1 + bary[1] * n2 + bary[2] * n3);
dist = barytemp.z;
mindist = dist;
//glm::vec3 pos = r.origin + r.direction * dist;
glm::vec3 intersect = r.origin + r.direction*dist;
//printf("KDLOOPPTR INTERSECT POINT: P: [%f %f %f] NODEID: %d\n", intersect.x,
// intersect.y,
// intersect.z,
// currID);
norm = glm::normalize((1 - bary.x - bary.y) * n1 + bary.x * n2 + (bary.y) * n3);
//norm(glm::normalize(n1));
//intersect += norm*0.0001f;
t = dist;
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = 0;// obj_materialOffsets[i];
intersect_point = intersect;
tmp_intersect = intersect;
tmp_normal = norm;//glm::vec3(0.0f, 1.0f, 0.0f);
intersections[path_index].t = t;
}
}
}
}
}
else
currID = nodes[currID].parentID;
}
}
}
if (hit_geom_index != -1)
{
hit_geom_index = 0;
obj_intersect = true;
t_min = dist;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
// updating rays
thrust::default_random_engine rng = makeSeededRandomEngine(iter, path_index, depth);
if (obj_intersect)
{
objMaterialIdx = 0;
pathSegments[path_index].materialIdHit = objMaterialIdx;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[objMaterialIdx],
rng,
softness);
}
else
{
pathSegments[path_index].materialIdHit = geoms[hit_geom_index].materialid;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[geoms[hit_geom_index].materialid],
rng,
softness);
}
if (obj_intersect)
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[0].materialid;
intersections[path_index].surfaceNormal = normal;
}
else
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
}
}
__global__ void shadeMaterial(
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
, bool enablesss
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
//idx = pathSegments[idx].initialidx;
//idx = pathSegments[idx].pixelIndex;
if (pathSegments[idx].remainingBounces>0)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f)
{ // if the intersection exists...
// Set up the RNG
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
else {
if (enablesss && (material.transmittance.x > 0.0f || material.transmittance.y > 0.0f || material.transmittance.z > 0.0f))
{
float scenescale = 1.0f;
float sss = scenescale * pathSegments[idx].ray.sdepth > 1.0 ? 1.0 : pathSegments[idx].ray.sdepth;
sss = 1.0f - sss < 0.0 ? 0.0 : sss;
sss = glm::pow(sss, 2);
pathSegments[idx].color *= (materialColor)* 1.0f + material.hasRefractive * material.specular.color + sss * material.transmittance;
}
else if (material.hasRefractive > 0.0f)
{
pathSegments[idx].color *= (materialColor)* 1.0f + material.hasRefractive * material.specular.color;
}
else if (material.hasReflective > 0.0f)
{
pathSegments[idx].color *= (materialColor)* 1.0f + material.hasReflective * material.specular.color;
}
else
{
pathSegments[idx].color *= (materialColor) * 1.0f;
}
pathSegments[idx].remainingBounces--;
}
}
else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
index = iterationPaths[index].pixelIndex;
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
// Add the current iteration's output to the current image
__global__ void partialGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
//index = iterationPaths[index].pixelIndex;
if (iterationPaths[index].remainingBounces == 0)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo,
int frame,
int iter,
float focalLength,
float dofAngle,
bool cacherays,
bool antialias,
float softness,
bool enableSss,
bool testingmode,
bool compaction,
bool enablekd,
bool vizkd,
bool usebbox,
bool shortstack) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 32;
///////////////////////////////////////////////////////////////////////////
// perform one iteration of path tracing
hipEvent_t startGenRayFromCam, stopGenRayFromCam;
hipEvent_t startPathTraceOneBounce, stopPathTraceOneBounce;
hipEvent_t startShadeMaterial, stopShadeMaterial;
float millisecondsGenRayFromCam = 0.0f;
float millisecondsPathTraceOneBounce = 0.0f;
float millisecondsShadeMaterial = 0.0f;
float ms1 = 0.0;
float ms2 = 0.0;
float ms3 = 0.0;
// cache rays
if (cacherays)
{
if (iter == 1)
{
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >(cam, iter, traceDepth, dev_paths_cache, focalLength, dofAngle, antialias);
checkCUDAError("generate camera ray");
}
hipMemcpy(dev_paths, dev_paths_cache, pixelcount*sizeof(PathSegment), hipMemcpyDeviceToDevice);
}
else
{
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >(cam, iter, traceDepth, dev_paths, focalLength, dofAngle, antialias);
checkCUDAError("generate camera ray");
}
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
int num_paths_temp = num_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
if (testingmode)
{
hipEventCreate(&startPathTraceOneBounce); hipEventCreate(&stopPathTraceOneBounce); hipEventRecord(startPathTraceOneBounce);
}
if (enablekd == false)
{
pathTraceOneBounce << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, iter
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_materials
, hst_scene->materials.size()
, dev_intersections
, softness
, hst_scene->obj_numshapes
, obj_numpolyverts
, obj_verts
, obj_norms
, obj_texts
, obj_polyoffsets
, obj_polysidxflat
, obj_polysbboxes
, hst_scene->polyidxcount
, obj_materialOffsets
, hst_scene->hasObj
, usebbox);
checkCUDAError("trace one bounce");
///*
//printf("numNodes = %d\n", hst_scene->numNodes);
//printf("numTriangles = %d\n", hst_scene->numTriangles);
}
else
{
if (vizkd)
{
pathTraceOneBounceKDbareBoxes << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, iter
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_materials
, hst_scene->materials.size()
, dev_intersections
, softness
, kd_trianglesBare
, hst_scene->numTriangles
, kd_nodesBare
, hst_scene->numNodes
, obj_materialOffsets
, hst_scene->hasObj);
checkCUDAError("trace one bounce kd");
//hipEventQuery(0);
}
else
{
pathTraceOneBounceKDbare << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, iter
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_materials
, hst_scene->materials.size()
, dev_intersections
, softness
, kd_trianglesBare
, hst_scene->numTriangles
, kd_nodesBare
, hst_scene->numNodes
, obj_materialOffsets
, hst_scene->hasObj
, shortstack);
checkCUDAError("trace one bounce kd");
//hipEventQuery(0);
}
}
hipDeviceSynchronize();
depth++;
if (testingmode)
{
hipEventRecord(stopPathTraceOneBounce); hipEventSynchronize(stopPathTraceOneBounce);
ms2 = 0;
hipEventElapsedTime(&ms2, startPathTraceOneBounce, stopPathTraceOneBounce);
millisecondsPathTraceOneBounce += ms2;
hipEventDestroy(startPathTraceOneBounce);
hipEventDestroy(stopPathTraceOneBounce);
}
shadeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > (
iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials,
enableSss
);
if (compaction)
{
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
partialGather << <numBlocksPixels, blockSize1d >> >(num_paths, dev_image, dev_paths);
}
if (compaction)
{
thrust::device_ptr<PathSegment> thrust_paths(dev_paths);
thrust::device_ptr<PathSegment> P = thrust::remove_if(thrust_paths, thrust_paths + num_paths, is_zero_bounce());
num_paths_temp = P - thrust_paths;
num_paths = num_paths_temp;
}
// after first hit
if (iter == 2)
{
thrust::device_ptr<PathSegment> thrust_paths2(dev_paths);
thrust::sort(thrust_paths2, thrust_paths2 + num_paths);
thrust::device_ptr<ShadeableIntersection> thrust_intersections(dev_intersections);
thrust::sort(thrust_intersections, thrust_intersections + num_paths);
}
// stop if numpaths is 0 or depth > 8 when testing without compaction
if (num_paths <= 0 || depth > 7)
iterationComplete = true;
}
if (testingmode)
{
printf(" pathTrace time = %f\n", millisecondsPathTraceOneBounce);
//printf("%f,\n", millisecondsPathTraceOneBounce);
}
if (!compaction)
{
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather << <numBlocksPixels, blockSize1d >> >(num_paths, dev_image, dev_paths);
}
///////////////////////////////////////////////////////////////////////////
sendImageToPBO << <blocksPerGrid2d, blockSize2d >> >(pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
|
8b55bc6a45970aa7ccb89308ad337213d2f94cf4.cu
|
#include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include <glm/gtc/matrix_inverse.hpp>
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#include <algorithm>
#include <stdlib.h>
#include <random>
#include <vector>
#include <stack>
#include <string>
#include <fstream>
#include <iostream>
#include <iomanip>
#include <glm/gtx/intersect.hpp>
#include "KDnode.h"
#include "KDtree.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int)(pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int)(pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int)(pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static PathSegment * dev_paths_cache = NULL;
static ShadeableIntersection * dev_intersections = NULL;
static const int STACK_SIZE = 4000;
struct is_zero_bounce
{
__host__ __device__
bool operator()(const PathSegment p)
{
return (p.remainingBounces == 0);
}
};
struct by_material_id
{
const PathSegment a;
by_material_id(PathSegment _a) : a(_a) {}
__host__ __device__
int operator()(const PathSegment& x, const PathSegment& y) const
{
return x.color.r + y.color.r;
}
};
__host__ __device__ bool operator<(const PathSegment &lhs, const PathSegment &rhs)
{
return lhs.materialIdHit < rhs.materialIdHit;
}
__host__ __device__ bool operator<(const ShadeableIntersection &lhs, const ShadeableIntersection &rhs)
{
return lhs.materialId < rhs.materialId;
}
struct NodeStack{
KDN::NodeBare* node;
float tmin;
float tmax;
glm::vec3 origin;
};
// ------------------------------------------------------------------------
// --------------------------- KD TREE UTILITIES --------------------------
// ------------------------------------------------------------------------
std::vector<KDN::Triangle*> getTrianglesFromFile(const char* path)
{
std::vector<KDN::Triangle*>triangles;
string line;
ifstream file(path);
if (file.is_open())
{
while (getline(file, line))
{
float x1 = atof(line.c_str());
getline(file, line); float y1 = atof(line.c_str());
getline(file, line); float z1 = atof(line.c_str());
getline(file, line); float x2 = atof(line.c_str());
getline(file, line); float y2 = atof(line.c_str());
getline(file, line); float z2 = atof(line.c_str());
getline(file, line); float x3 = atof(line.c_str());
getline(file, line); float y3 = atof(line.c_str());
getline(file, line); float z3 = atof(line.c_str());
KDN::Triangle* t = new KDN::Triangle(x1, y1, z1,
x2, y2, z2,
x3, y3, z3);
triangles.push_back(t);
}
}
return triangles;
}
// ------------------------------------------------------------------------
// --------------------------- END KD TREE UTILITIES ----------------------
// ------------------------------------------------------------------------
int obj_numshapes = 0;
int* obj_numpolyverts = NULL;
float* obj_verts = NULL;
float* obj_norms = NULL;
float* obj_texts = NULL;
int* obj_polyoffsets = NULL;
int* obj_polysidxflat = NULL;
float* obj_polysbboxes = NULL;
int* obj_materialOffsets = NULL;
// KD DATA
//KDN::KDnode* kd_nodes = NULL;
//KDN::Triangle* kd_triangles = NULL;
KDN::NodeBare* kd_nodesBare = NULL;
KDN::TriBare* kd_trianglesBare = NULL;
static int numNodes = 0;
static int numTriangles = 0;
void pathtraceInit(Scene *scene, bool enablekd) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_paths_cache, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// objloader part
if (scene->hasObj)
{
if (enablekd == false)
{
cudaMalloc((void**)&obj_numpolyverts, scene->obj_numshapes * sizeof(int));
cudaMalloc((void**)&obj_polyoffsets, scene->obj_numshapes * sizeof(int));
cudaMalloc((void**)&obj_polysidxflat, scene->polyidxcount * sizeof(int));
cudaMalloc((void**)&obj_verts, scene->objmesh->attrib.vertices.size()* sizeof(float));
cudaMalloc((void**)&obj_norms, scene->objmesh->attrib.normals.size()* sizeof(float));
cudaMalloc((void**)&obj_texts, scene->objmesh->attrib.texcoords.size()* sizeof(float));
cudaMalloc((void**)&obj_polysbboxes, scene->obj_numshapes * 6 * sizeof(float));
}
cudaMalloc((void**)&obj_materialOffsets, scene->obj_numshapes * sizeof(int));
// ------------------------------------------------------------------
// KD DATA PART
// ------------------------------------------------------------------
if (enablekd == true)
{
cudaMalloc((void**)&kd_nodesBare, scene->numNodes * sizeof(KDN::NodeBare));
cudaMalloc((void**)&kd_trianglesBare, scene->numTriangles * sizeof(KDN::TriBare));
cudaMemcpy(kd_nodesBare, scene->newNodesBare, scene->numNodes * sizeof(KDN::NodeBare), cudaMemcpyHostToDevice);
cudaMemcpy(kd_trianglesBare, scene->newTrianglesBare, scene->numTriangles * sizeof(KDN::TriBare), cudaMemcpyHostToDevice);
}
else
{
cudaMemcpy(obj_numpolyverts, scene->obj_numpolyverts, scene->obj_numshapes * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(obj_polyoffsets, scene->obj_polyoffsets, scene->obj_numshapes * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(obj_polysidxflat, scene->obj_polysidxflat, scene->polyidxcount * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(obj_verts, scene->obj_verts, scene->objmesh->attrib.vertices.size()* sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(obj_norms, scene->obj_norms, scene->objmesh->attrib.normals.size()* sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(obj_texts, scene->obj_texts, scene->objmesh->attrib.texcoords.size()* sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(obj_polysbboxes, scene->obj_bboxes, scene->obj_numshapes * 6 * sizeof(float), cudaMemcpyHostToDevice);
}
cudaMemcpy(obj_materialOffsets, scene->obj_materialOffsets, scene->obj_numshapes * sizeof(int), cudaMemcpyHostToDevice);
}
/*
// compare the compressed sizes
printf("sizeof KDnode: %d\n", sizeof(KDN::KDnode));
printf("sizeof KDtriangle: %d\n", sizeof(KDN::Triangle));
printf("sizeof NodeBare: %d\n", sizeof(KDN::NodeBare));
printf("sizeof TriBare: %d\n", sizeof(KDN::TriBare));
// sizeof KDnode: 136
// sizeof KDtriangle: 116
// sizeof NodeBare: 64
// sizeof TriBare: 76
*/
checkCUDAError("pathtraceInit");
}
void pathtraceFree(Scene *scene, bool enablekd) {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_paths_cache);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
// objloader part
if (scene->hasObj)
{
if (enablekd == false)
{
cudaFree(obj_numpolyverts);
cudaFree(obj_polyoffsets);
cudaFree(obj_polysidxflat);
cudaFree(obj_verts);
cudaFree(obj_norms);
cudaFree(obj_texts);
}
cudaFree(obj_materialOffsets);
if (enablekd == true)
{
cudaFree(kd_nodesBare);
cudaFree(kd_trianglesBare);
}
}
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments, float focalLength, float dofAngle, bool antialias)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
segment.ray.isinside = false;
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
// antialiasing by jittering the ray
thrust::default_random_engine rng(utilhash(iter));
thrust::uniform_real_distribution<float> unitDistrib(0, 1);
if (antialias)
{
float jitterscale = 0.002;
bool fast = true;
if (fast)
{
// use cheap jitter
glm::vec3 v3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
v3 = glm::normalize(v3);
segment.ray.direction += v3*jitterscale;
segment.ray.direction = glm::normalize(segment.ray.direction);
}
else
{
// use uniform spherical distribution
float u = cos(PI * (float)unitDistrib(rng));
float u2 = u*u;
float sqrt1minusu2 = sqrt(1 - u2);
float theta = 2 * PI * (float)unitDistrib(rng);
glm::vec3 v3(sqrt1minusu2 * cos(theta),
sqrt1minusu2 * sin(theta),
u);
segment.ray.direction += v3*jitterscale;
}
}
// use uniform spherical distribution
float u = cos(PI * (float)unitDistrib(rng));
float u2 = u*u;
float sqrt1minusu2 = sqrt(1 - u2);
float theta = 2 * PI * (float)unitDistrib(rng);
glm::vec3 v3(sqrt1minusu2 * cos(theta),
sqrt1minusu2 * sin(theta),
u);
v3 = glm::normalize(v3);
glm::vec3 center = cam.position + 8.0f * segment.ray.direction;
float R1 = (float)unitDistrib(rng);
float R2 = (float)unitDistrib(rng);
glm::vec3 front = glm::normalize(cam.lookAt);
glm::vec3 up = glm::normalize(cam.up);
glm::vec3 right = glm::normalize(cam.right);
glm::quat Q1;
float randangle = (float)unitDistrib(rng) * PI * dofAngle;
Q1.w = cosf(randangle / 2.0f);
Q1.x = v3.x * sinf(randangle / 2.0f);
Q1.y = v3.y * sinf(randangle / 2.0f);
Q1.z = v3.z * sinf(randangle / 2.0f);
glm::vec3 randrot = glm::rotate(Q1, segment.ray.direction);
segment.ray.origin = segment.ray.origin + segment.ray.direction * focalLength - randrot*focalLength;
segment.ray.direction = randrot;
segment.ray.direction = glm::normalize(segment.ray.direction);
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// pathTraceOneBounce handles ray intersections, generate intersections for shading,
// and scatter new ray. You might want to call scatterRay from interactions.h
__global__ void pathTraceOneBounce(
int depth
, int iter
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, Material * materials
, int material_size
, ShadeableIntersection * intersections
, float softness
, int obj_numshapes
, int* obj_numpolyverts
, float* obj_verts
, float* obj_norms
, float* obj_texts
, int* obj_polyoffsets
, int* obj_polysidxflat
, float* obj_polysbboxes
, int polyidxcount
, int* obj_materialOffsets
, bool hasobj
, bool usebbox
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
if (pathSegments[path_index].remainingBounces>0)
{
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
glm::vec3 hit;
glm::vec3 norm;
glm::vec3 bary;
glm::vec3 v1;
glm::vec3 v2;
glm::vec3 v3;
glm::vec3 n1;
glm::vec3 n2;
glm::vec3 n3;
int pidxo1 = 0;
int pidxo2 = 0;
int pidxo3 = 0;
bool intersected = false;
bool obj_intersect = false;
// naive parse through global geoms
int objMaterialIdx = -1;
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
// start polygon hits
objMaterialIdx = -1;
int iterator = 0;
if (hasobj)
{
for (int i = 0; i < obj_numshapes; i++)
{
objMaterialIdx = obj_materialOffsets[i];
// check bounding intersection first
float T = -1.0f;
if (usebbox)
{
T = intersectBbox(pathSegment.ray.origin,
pathSegment.ray.direction,
glm::vec3(obj_polysbboxes[i] - 0.01,
obj_polysbboxes[i + 1] - 0.01,
obj_polysbboxes[i + 2] - 0.01),
glm::vec3(obj_polysbboxes[i + 3] + 0.01,
obj_polysbboxes[i + 4] + 0.01,
obj_polysbboxes[i + 5] + 0.01));
}
else
{
T = 0;
}
if (T > -1.0f)
{
for (int j = iterator; j < iterator + obj_polyoffsets[i]; j += 3)
{
pidxo1 = 3 * obj_polysidxflat[j];
pidxo2 = 3 * obj_polysidxflat[j + 1];
pidxo3 = 3 * obj_polysidxflat[j + 2];
v1.x = obj_verts[pidxo1];
v1.y = obj_verts[pidxo1 + 1];
v1.z = obj_verts[pidxo1 + 2];
v2.x = obj_verts[pidxo2];
v2.y = obj_verts[pidxo2 + 1];
v2.z = obj_verts[pidxo2 + 2];
v3.x = obj_verts[pidxo3];
v3.y = obj_verts[pidxo3 + 1];
v3.z = obj_verts[pidxo3 + 2];
n1.x = obj_norms[pidxo1];
n1.y = obj_norms[pidxo1 + 1];
n1.z = obj_norms[pidxo1 + 2];
n2.x = obj_norms[pidxo2];
n2.y = obj_norms[pidxo2 + 1];
n2.z = obj_norms[pidxo2 + 2];
n3.x = obj_norms[pidxo3];
n3.y = obj_norms[pidxo3 + 1];
n3.z = obj_norms[pidxo3 + 2];
intersected = false;
bary.x = 0.0f; bary.y = 0.0f; bary.z = 0.0f;
intersected = glm::intersectRayTriangle(pathSegment.ray.origin,
pathSegment.ray.direction,
v1, v2, v3, bary);
glm::vec3 bary2(bary.x, bary.y, 1.0 - bary.x - bary.y);
if (intersected)
{
hit = pathSegment.ray.origin + pathSegment.ray.direction* bary.z;
norm = glm::normalize((1 - bary.x - bary.y) * n1 + bary.x * n2 + (bary.y) * n3);
hit += norm*0.0001f;
t = glm::distance(pathSegment.ray.origin, hit);
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = obj_materialOffsets[i];
intersect_point = hit;
normal = norm;
tmp_intersect = hit;
tmp_normal = normal;
obj_intersect = true;
intersections[path_index].t = t;
}
}
}
iterator += obj_polyoffsets[i];
}
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
// updating rays
thrust::default_random_engine rng = makeSeededRandomEngine(iter, path_index, depth);
if (obj_intersect)
{
pathSegments[path_index].materialIdHit = objMaterialIdx;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[objMaterialIdx],
rng,
softness);
}
else
{
pathSegments[path_index].materialIdHit = geoms[hit_geom_index].materialid;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[geoms[hit_geom_index].materialid],
rng,
softness);
}
if (obj_intersect)
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = objMaterialIdx;
intersections[path_index].surfaceNormal = normal;
}
else
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
}
}
// pathTraceOneBounce handles ray intersections, generate intersections for shading,
// and scatter new ray. You might want to call scatterRay from interactions.h
__global__ void pathTraceOneBounceKDfix(
int depth
, int iter
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, Material * materials
, int material_size
, ShadeableIntersection * intersections
, float softness
, KDN::Triangle* triangles
, int numTriangles
, KDN::KDnode* nodes
, int numNodes
, int* obj_materialOffsets
, bool hasobj
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
if (pathSegments[path_index].remainingBounces>0)
{
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
glm::vec3 hit;
glm::vec3 norm;
glm::vec3 bary;
glm::vec3 v1;
glm::vec3 v2;
glm::vec3 v3;
glm::vec3 n1;
glm::vec3 n2;
glm::vec3 n3;
int pidxo1 = 0;
int pidxo2 = 0;
int pidxo3 = 0;
bool intersected = false;
bool obj_intersect = false;
// naive parse through global geoms
int objMaterialIdx = -1;
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
objMaterialIdx = -1;
int iterator = 0;
if (hasobj)
{
if (numNodes != 0)
{
bool nodeIDs[100] = { false };
int currID = nodeIDs[nodes[0].ID];
float dist = -1.0;
// get the root node
for (int i = 0; i < numNodes; i++)
{
if (nodes[i].parentID == -1)
{
currID = nodes[i].ID;
break;
}
}
KDN::KDnode* node = &(nodes[currID]);
bool hitGeom = false;
float boxdist = -1.0f;
bary.z = FLT_MAX;
while (true)
{
if (currID == -1)
break;
node = &(nodes[currID]);
// check if it intersects the bounds
if (nodeIDs[currID] == true)
{
nodeIDs[node->ID] = true;
nodeIDs[node->leftID] = true;
nodeIDs[node->rightID] = true;
currID = node->parentID;
continue;
}
else
{
hitGeom = intersectAABB(pathSegment.ray, node->bbox, dist);
if (hitGeom == false && node->parentID == -1)
break;
}
if (hitGeom == false && dist > bary.z)
{
nodeIDs[node->ID] = true;
nodeIDs[node->leftID] = true;
nodeIDs[node->rightID] = true;
currID = node->parentID;
}
else
{
if (nodes[currID].leftID != -1 && nodeIDs[nodes[currID].leftID] != true)
currID = node->leftID;
else if (nodes[currID].rightID != -1 && nodeIDs[nodes[currID].rightID] != true)
currID = node->rightID;
else if (nodeIDs[node->ID] == false)
{
nodeIDs[node->ID] = true;
int size = node->triIdSize;
if (size > 0)
{
int start = node->triIdStart;
int end = start + size;
for (int i = start; i < end; i++)
{
KDN::Triangle* T = &(triangles[i]);
glm::vec3 v1(T->x1, T->y1, T->z1);
glm::vec3 v2(T->x2, T->y2, T->z2);
glm::vec3 v3(T->x3, T->y3, T->z3);
glm::vec3 n1(T->nx1, T->ny1, T->nz1);
glm::vec3 n2(T->nx2, T->ny2, T->nz2);
glm::vec3 n3(T->nx3, T->ny3, T->nz3);
bool intersected = glm::intersectRayTriangle(pathSegment.ray.origin,
pathSegment.ray.direction,
v1, v2, v3, bary);
if (intersected)
{
objMaterialIdx = triangles[i].mtlIdx + material_size - 1;
hit = pathSegment.ray.origin + pathSegment.ray.direction* bary.z;// (bary2.x * v1 + bary2.y * v2 + bary2.z * v3);
norm = glm::normalize((1 - bary.x - bary.y) * n1 + bary.x * n2 + (bary.y) * n3);
hit += norm*0.0001f;
t = glm::distance(pathSegment.ray.origin, hit);
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = obj_materialOffsets[T->mtlIdx];
intersect_point = hit;
normal = norm;
tmp_intersect = hit;
tmp_normal = normal;
obj_intersect = true;
intersections[path_index].t = t;
}
}
}
}
}
else
currID = node->parentID;
}
}
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
thrust::default_random_engine rng = makeSeededRandomEngine(iter, path_index, depth);
if (obj_intersect)
{
pathSegments[path_index].materialIdHit = objMaterialIdx;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[objMaterialIdx],
rng,
softness);
}
else
{
pathSegments[path_index].materialIdHit = geoms[hit_geom_index].materialid;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[geoms[hit_geom_index].materialid],
rng,
softness);
}
if (obj_intersect)
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = objMaterialIdx;
intersections[path_index].surfaceNormal = normal;
}
else
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
}
}
__host__ __device__
void traverseKDbare(KDN::NodeBare* nodes, int numNodes,
float& t,
PathSegment pathSegment,
KDN::TriBare* triangles,
glm::vec3& bary,
int& objMaterialIdx,
int& material_size,
glm::vec3& hit,
glm::vec3& norm,
float& t_min,
int& hit_geom_index,
glm::vec3& intersect_point,
glm::vec3& normal,
glm::vec3& tmp_intersect,
glm::vec3& tmp_normal,
bool& obj_intersect,
ShadeableIntersection* intersections,
int* obj_materialOffsets,
int& path_index)
{
if (numNodes != 0)
{
bool nodeIDs[STACK_SIZE] = { false };
int currID = nodeIDs[nodes[0].ID];
float dist = -1.0;
// get the root node
for (int i = 0; i < numNodes; i++)
{
if (nodes[i].parentID == -1)
{
currID = nodes[i].ID;
break;
}
}
KDN::NodeBare* node = &(nodes[currID]);
bool hitGeom = false;
float boxdist = -1.0f;
bary.z = FLT_MAX;
while (true)
{
if (currID == -1)
break;
node = &(nodes[currID]);
// check if it intersects the bounds
if (hitGeom == false && node->parentID == -1 && nodeIDs[node->ID] == true)
break;
hitGeom = intersectAABBarrays(pathSegment.ray, nodes[currID].mins, nodes[currID].maxs, dist);
if (nodeIDs[currID] == true)
{
nodeIDs[node->ID] = true;
nodeIDs[node->leftID] = true;
nodeIDs[node->rightID] = true;
currID = node->parentID;
continue;
}
else
{
// if we reached the top and didn't hit anything
if (hitGeom == false && node->parentID == -1)
break;
}
// if the distance is greater than the last poly hit
if (hitGeom == false || dist > bary.z)
{
nodeIDs[node->ID] = true;
nodeIDs[node->leftID] = true;
nodeIDs[node->rightID] = true;
currID = node->parentID;
}
else
{
if (nodes[currID].leftID != -1 && nodeIDs[nodes[currID].leftID] != true)
currID = node->leftID;
else if (nodes[currID].rightID != -1 && nodeIDs[nodes[currID].rightID] != true)
currID = node->rightID;
else if (nodeIDs[node->ID] == false)
{
nodeIDs[node->ID] = true;
int size = node->triIdSize;
if (size > 0)
{
int start = node->triIdStart;
int end = start + size;
for (int i = start; i < end; i++)
{
KDN::TriBare* T = &(triangles[i]);
glm::vec3 v1(T->x1, T->y1, T->z1);
glm::vec3 v2(T->x2, T->y2, T->z2);
glm::vec3 v3(T->x3, T->y3, T->z3);
bool intersected = glm::intersectRayTriangle(pathSegment.ray.origin,
pathSegment.ray.direction,
v1, v2, v3, bary);
if (intersected)
{
glm::vec3 n1(T->nx1, T->ny1, T->nz1);
glm::vec3 n2(T->nx2, T->ny2, T->nz2);
glm::vec3 n3(T->nx3, T->ny3, T->nz3);
objMaterialIdx = triangles[i].mtlIdx + material_size - 1;
hit = pathSegment.ray.origin + pathSegment.ray.direction* bary.z;
norm = glm::normalize((1 - bary.x - bary.y) * n1 + bary.x * n2 + (bary.y) * n3);
hit += norm*0.00001f;
t = glm::distance(pathSegment.ray.origin, hit);
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = obj_materialOffsets[T->mtlIdx];
intersect_point = hit;
normal = norm;
tmp_intersect = hit;
tmp_normal = normal;
obj_intersect = true;
intersections[path_index].t = t;
}
}
}
}
}
else
currID = node->parentID;
}
}
}
}
__host__ __device__
void traverseKDbareShortHybrid(KDN::NodeBare* nodes, int numNodes,
float& t,
PathSegment pathSegment,
KDN::TriBare* triangles,
glm::vec3& bary,
int& objMaterialIdx,
int& material_size,
glm::vec3& hit,
glm::vec3& norm,
float& t_min,
int& hit_geom_index,
glm::vec3& intersect_point,
glm::vec3& normal,
glm::vec3& tmp_intersect,
glm::vec3& tmp_normal,
bool& obj_intersect,
ShadeableIntersection* intersections,
int* obj_materialOffsets,
int& path_index)
{
if (numNodes != 0)
{
bool nodeIDs[STACK_SIZE] = { false };
int currID = nodeIDs[nodes[0].ID];
float dist = -1.0;
// get the root node
for (int i = 0; i < numNodes; i++)
{
if (nodes[i].parentID == -1)
{
currID = nodes[i].ID;
break;
}
}
KDN::NodeBare* node = &(nodes[currID]);
int axis;
float tSplit;
bool hitGeom = false;
float boxdist = -1.0f;
bary.z = FLT_MAX;
while (true)
{
if (currID == -1)
break;
node = &(nodes[currID]);
// check if it intersects the bounds
if (hitGeom == false && node->parentID == -1 && nodeIDs[node->ID] == true)
break;
hitGeom = intersectAABBarrays(pathSegment.ray, nodes[currID].mins, nodes[currID].maxs, dist);
if (nodeIDs[currID] == true)
{
nodeIDs[node->ID] = true;
nodeIDs[node->leftID] = true;
nodeIDs[node->rightID] = true;
currID = node->parentID;
continue;
}
else
{
// if we reached the top and didn't hit anything
if (hitGeom == false && node->parentID == -1)
break;
}
// if the distance is greater than the last poly hit
if (hitGeom == false || dist > bary.z)
{
nodeIDs[node->ID] = true;
nodeIDs[node->leftID] = true;
nodeIDs[node->rightID] = true;
currID = node->parentID;
}
else
{
axis = node->axis;
if (pathSegment.ray.direction[axis] > 0.0f)
{
// left side first
if (nodes[currID].leftID != -1 && nodeIDs[nodes[currID].leftID] != true)
currID = node->leftID;
else if (nodes[currID].rightID != -1 && nodeIDs[nodes[currID].rightID] != true)
currID = node->rightID;
else if (nodeIDs[node->ID] == false)
{
nodeIDs[node->ID] = true;
int size = node->triIdSize;
if (size > 0)
{
int start = node->triIdStart;
int end = start + size;
for (int i = start; i < end; i++)
{
KDN::TriBare* T = &(triangles[i]);
glm::vec3 v1(T->x1, T->y1, T->z1);
glm::vec3 v2(T->x2, T->y2, T->z2);
glm::vec3 v3(T->x3, T->y3, T->z3);
bool intersected = glm::intersectRayTriangle(pathSegment.ray.origin,
pathSegment.ray.direction,
v1, v2, v3, bary);
if (intersected)
{
// skip other side
nodeIDs[nodes[nodeIDs[node->parentID]].rightID] = true;
glm::vec3 n1(T->nx1, T->ny1, T->nz1);
glm::vec3 n2(T->nx2, T->ny2, T->nz2);
glm::vec3 n3(T->nx3, T->ny3, T->nz3);
objMaterialIdx = triangles[i].mtlIdx + material_size - 1;
hit = pathSegment.ray.origin + pathSegment.ray.direction* bary.z;
norm = glm::normalize((1 - bary.x - bary.y) * n1 + bary.x * n2 + (bary.y) * n3);
hit += norm*0.0001f;
t = glm::distance(pathSegment.ray.origin, hit);
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = obj_materialOffsets[T->mtlIdx];
intersect_point = hit;
normal = norm;
tmp_intersect = hit;
tmp_normal = normal;
obj_intersect = true;
intersections[path_index].t = t;
}
}
}
}
}
else
currID = node->parentID;
}
else
{
// right side first
if (nodes[currID].rightID != -1 && nodeIDs[nodes[currID].rightID] != true)
currID = node->rightID;
else if (nodes[currID].leftID != -1 && nodeIDs[nodes[currID].leftID] != true)
currID = node->leftID;
else if (nodeIDs[node->ID] == false)
{
nodeIDs[node->ID] = true;
int size = node->triIdSize;
if (size > 0)
{
int start = node->triIdStart;
int end = start + size;
for (int i = start; i < end; i++)
{
KDN::TriBare* T = &(triangles[i]);
glm::vec3 v1(T->x1, T->y1, T->z1);
glm::vec3 v2(T->x2, T->y2, T->z2);
glm::vec3 v3(T->x3, T->y3, T->z3);
bool intersected = glm::intersectRayTriangle(pathSegment.ray.origin,
pathSegment.ray.direction,
v1, v2, v3, bary);
if (intersected)
{
// skip other side
nodeIDs[nodes[nodeIDs[node->parentID]].leftID] = true;
glm::vec3 n1(T->nx1, T->ny1, T->nz1);
glm::vec3 n2(T->nx2, T->ny2, T->nz2);
glm::vec3 n3(T->nx3, T->ny3, T->nz3);
objMaterialIdx = triangles[i].mtlIdx + material_size - 1;
hit = pathSegment.ray.origin + pathSegment.ray.direction* bary.z;
norm = glm::normalize((1 - bary.x - bary.y) * n1 + bary.x * n2 + (bary.y) * n3);
hit += norm*0.0001f;
t = glm::distance(pathSegment.ray.origin, hit);
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = obj_materialOffsets[T->mtlIdx];
intersect_point = hit;
normal = norm;
tmp_intersect = hit;
tmp_normal = normal;
obj_intersect = true;
intersections[path_index].t = t;
}
}
}
}
}
else
currID = node->parentID;
}
}
}
}
}
__host__ __device__
void traverseKDshort(KDN::NodeBare* nodes, int numNodes,
float& t,
PathSegment pathSegment,
KDN::TriBare* triangles,
glm::vec3& bary,
int& objMaterialIdx,
int& material_size,
glm::vec3& hit,
glm::vec3& norm,
float& t_min,
int& hit_geom_index,
glm::vec3& intersect_point,
glm::vec3& normal,
glm::vec3& tmp_intersect,
glm::vec3& tmp_normal,
bool& obj_intersect,
ShadeableIntersection* intersections,
int* obj_materialOffsets,
int& path_index)
{
NodeStack stack[STACK_SIZE];
int top = -1;
KDN::NodeBare* node;
KDN::NodeBare* root;
KDN::NodeBare* first;
KDN::NodeBare* second;
// get the root node
for (int i = 0; i < numNodes; i++)
{
if (nodes[i].parentID == -1)
{
node = &(nodes[i]);
root = &(nodes[i]);
break;
}
}
float tMin, tMax, tHit, sceneMax;
tMin = tMax = 0.0f;
tHit = t_min;
sceneMax = FLT_MAX;
bool pushdown = false;
int axis = 0;
float tSplit = 0.0f;
float dist = 0.0f;
bool bboxintersect = false;
while (tMax < sceneMax)
{
if (top == -1)
{
node = root;
tMin = tMax;
tMax = sceneMax;
pushdown = true;
}
else
{
node = stack[top].node;
tMin = node->tmin;
tMax = node->tmax;
top--;
pushdown = false;
}
while (node->triIdSize != 0)
{
axis = node->axis;
tSplit = (node->splitPos - pathSegment.ray.origin[axis]) / pathSegment.ray.direction[axis];
if (pathSegment.ray.direction[axis] > 0.0f)
{
if (nodes[node->leftID].mins[axis] < nodes[node->rightID].mins[axis])
{
first = &(nodes[node->leftID]);
second = &(nodes[node->rightID]);
}
else
{
first = &nodes[node->rightID];
second = &nodes[node->leftID];
}
}
else
{
if (nodes[node->leftID].maxs[axis] > nodes[node->rightID].maxs[axis])
{
first = &(nodes[node->leftID]);
second = &(nodes[node->rightID]);
}
else
{
first = &(nodes[node->rightID]);
second = &(nodes[node->leftID]);
}
}
if (tSplit >= tMax || tSplit < 0.0f)
node = first;
else if (tSplit <= tMin)
node = second;
else
{
second->tmin = tSplit;
second->tmax = tMax;
top++;
if (top <= 199)
{
stack[top].node = second;
stack[top].tmin = tSplit;
stack[top].tmax = tMax;
}
else
{
break;
}
node = first;
tMax = tSplit;
pushdown = false;
}
if (pushdown)
root = node;
bboxintersect = intersectAABBarrays(pathSegment.ray, node->mins, node->maxs, dist);
if (bboxintersect)
{
int start = node->triIdStart;
int end = start + node->triIdSize;
for (int i = start; i < end; i++)
{
KDN::TriBare* T = &(triangles[i]);
glm::vec3 v1(T->x1, T->y1, T->z1);
glm::vec3 v2(T->x2, T->y2, T->z2);
glm::vec3 v3(T->x3, T->y3, T->z3);
bool intersected = glm::intersectRayTriangle(pathSegment.ray.origin,
pathSegment.ray.direction,
v1, v2, v3, bary);
if (intersected)
{
glm::vec3 n1(T->nx1, T->ny1, T->nz1);
glm::vec3 n2(T->nx2, T->ny2, T->nz2);
glm::vec3 n3(T->nx3, T->ny3, T->nz3);
objMaterialIdx = triangles[i].mtlIdx + material_size - 1;
hit = pathSegment.ray.origin + pathSegment.ray.direction* bary.z;
norm = -glm::normalize((1 - bary.x - bary.y) * n1 + bary.x * n2 + (bary.y) * n3);
hit += norm*0.0001f;
t = glm::distance(pathSegment.ray.origin, hit);
if (t > 0.0f || t_min > t)
{
tHit = min(tHit, t);
t_min = t;
hit_geom_index = obj_materialOffsets[T->mtlIdx];
intersect_point = hit;
normal = norm;
tmp_intersect = hit;
tmp_normal = normal;
obj_intersect = true;
intersections[path_index].t = t;
}
}
}
}
}
}
}
__host__ __device__
void traverseKD(KDN::NodeBare* nodes, int numNodes,
float& t,
PathSegment pathSegment,
KDN::TriBare* triangles,
glm::vec3& bary,
int& objMaterialIdx,
int& material_size,
glm::vec3& hit,
glm::vec3& norm,
float& t_min,
int& hit_geom_index,
glm::vec3& intersect_point,
glm::vec3& normal,
glm::vec3& tmp_intersect,
glm::vec3& tmp_normal,
bool& obj_intersect,
ShadeableIntersection* intersections,
int* obj_materialOffsets,
int& path_index)
{
NodeStack stack[STACK_SIZE];
int top = -1;
KDN::NodeBare* node;
KDN::NodeBare* root;
KDN::NodeBare* first;
KDN::NodeBare* second;
// get the root node
for (int i = 0; i < numNodes; i++)
{
if (nodes[i].parentID == -1)
{
node = &(nodes[i]);
root = &(nodes[i]);
break;
}
}
glm::vec3 origin = pathSegment.ray.origin;
glm::vec3 invDirection(1.0f / pathSegment.ray.direction[0],
1.0f / pathSegment.ray.direction[1],
1.0f / pathSegment.ray.direction[2]);
float tmax = FLT_MAX;
float tClosestIntersection = t_min;
bool notFullyTraversed = true;
while (notFullyTraversed)
{
if (node->triIdSize != 0)
{
//test all primitives inside the leaf
float dist = 0.0f;
bool bboxintersect = intersectAABBarrays(pathSegment.ray, node->mins, node->maxs, dist);
if (bboxintersect)
{
int start = node->triIdStart;
int end = start + node->triIdSize;
for (int i = start; i < end; i++)
{
KDN::TriBare* T = &(triangles[i]);
glm::vec3 v1(T->x1, T->y1, T->z1);
glm::vec3 v2(T->x2, T->y2, T->z2);
glm::vec3 v3(T->x3, T->y3, T->z3);
bool intersected = glm::intersectRayTriangle(pathSegment.ray.origin,
pathSegment.ray.direction,
v1, v2, v3, bary);
if (intersected)
{
glm::vec3 n1(T->nx1, T->ny1, T->nz1);
glm::vec3 n2(T->nx2, T->ny2, T->nz2);
glm::vec3 n3(T->nx3, T->ny3, T->nz3);
objMaterialIdx = triangles[i].mtlIdx + material_size - 1;
hit = pathSegment.ray.origin + pathSegment.ray.direction* bary.z;
norm = -glm::normalize((1 - bary.x - bary.y) * n1 + bary.x * n2 + (bary.y) * n3);
hit += norm*0.0001f;
t = glm::distance(pathSegment.ray.origin, hit);
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = obj_materialOffsets[T->mtlIdx];
intersect_point = hit;
normal = norm;
tmp_intersect = hit;
tmp_normal = normal;
obj_intersect = true;
intersections[path_index].t = t;
//return;
}
}
}
}
//test if leaf + empty stack => return
if (top == -1)
{
notFullyTraversed = false;
}
else
{
//pop all stack
origin = stack[top].origin;
tmax = stack[top].tmax;
node = stack[top].node;
top--;
}
}
else
{
//get axis of node and its split plane
const int axis = node->axis;
const float plane = node->splitPos;
//test if ray is not parallel to plane
if ((fabs(pathSegment.ray.direction[axis]) > EPSILON))
{
const float t = (plane - origin[axis]) * invDirection[axis];
//case of the ray intersecting the plane, then test both childs
if (0.0f < t && t < tmax)
{
//traverse near first, then far. Set tmax = t for near
//push only far child onto stack
top++;
stack[top].origin[0] = origin[0] + pathSegment.ray.direction[0] * t;
stack[top].origin[1] = origin[1] + pathSegment.ray.direction[1] * t;
stack[top].origin[2] = origin[2] + pathSegment.ray.direction[2] * t;
stack[top].node = (origin[axis] > plane) ? &(nodes[node->leftID]) : &(nodes[node->rightID]);
stack[top].tmax = tmax - t;
tmax = t;
}
}
//in every case: traverse near child first
node = (origin[axis] > plane) ? &(nodes[node->rightID]) : &(nodes[node->leftID]);
}
}
}
// pathTraceOneBounce handles ray intersections, generate intersections for shading,
// and scatter new ray. You might want to call scatterRay from interactions.h
__global__ void pathTraceOneBounceKDbare(
int depth
, int iter
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, Material * materials
, int material_size
, ShadeableIntersection * intersections
, float softness
, KDN::TriBare* triangles
, int numTriangles
, KDN::NodeBare* nodes
, int numNodes
, int* obj_materialOffsets
, bool hasobj
, bool shortstack
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
if (pathSegments[path_index].remainingBounces>0)
{
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
glm::vec3 hit;
glm::vec3 norm;
glm::vec3 bary;
glm::vec3 v1;
glm::vec3 v2;
glm::vec3 v3;
glm::vec3 n1;
glm::vec3 n2;
glm::vec3 n3;
int pidxo1 = 0;
int pidxo2 = 0;
int pidxo3 = 0;
bool intersected = false;
bool obj_intersect = false;
// naive parse through global geoms
int objMaterialIdx = -1;
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
objMaterialIdx = -1;
int iterator = 0;
if (hasobj)
{
if (shortstack == false)
{
// standard traversal
traverseKDbare(nodes, numNodes, t,
pathSegment, triangles,
bary, objMaterialIdx,
material_size, hit,
norm, t_min,
hit_geom_index, intersect_point,
normal, tmp_intersect,
tmp_normal, obj_intersect,
intersections, obj_materialOffsets,
path_index);
}
else
{
// optimized short-stack traversal
traverseKDbareShortHybrid(nodes, numNodes, t,
pathSegment, triangles,
bary, objMaterialIdx,
material_size, hit,
norm, t_min,
hit_geom_index, intersect_point,
normal, tmp_intersect,
tmp_normal, obj_intersect,
intersections, obj_materialOffsets,
path_index);
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
// updating rays
thrust::default_random_engine rng = makeSeededRandomEngine(iter, path_index, depth);
if (obj_intersect)
{
pathSegments[path_index].materialIdHit = objMaterialIdx;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[objMaterialIdx],
rng,
softness);
}
else
{
pathSegments[path_index].materialIdHit = geoms[hit_geom_index].materialid;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[geoms[hit_geom_index].materialid],
rng,
softness);
}
if (obj_intersect)
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = objMaterialIdx;
intersections[path_index].surfaceNormal = normal;
}
else
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
}
}
// pathTraceOneBounce handles ray intersections, generate intersections for shading,
// and scatter new ray. You might want to call scatterRay from interactions.h
__global__ void pathTraceOneBounceKDbareBoxes(
int depth
, int iter
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, Material * materials
, int material_size
, ShadeableIntersection * intersections
, float softness
, KDN::TriBare* triangles
, int numTriangles
, KDN::NodeBare* nodes
, int numNodes
, int* obj_materialOffsets
, bool hasobj
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
if (pathSegments[path_index].remainingBounces>0)
{
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
glm::vec3 hit;
glm::vec3 norm;
glm::vec3 bary;
glm::vec3 v1;
glm::vec3 v2;
glm::vec3 v3;
glm::vec3 n1;
glm::vec3 n2;
glm::vec3 n3;
int pidxo1 = 0;
int pidxo2 = 0;
int pidxo3 = 0;
bool intersected = false;
bool obj_intersect = false;
// naive parse through global geoms
int objMaterialIdx = -1;
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
objMaterialIdx = -1;
int iterator = 0;
if (hasobj)
{
for (int i = 0; i < numNodes; i++)
{
t = boxIntersectionTestBox(nodes[i].mins, nodes[i].maxs, pathSegment.ray, tmp_intersect, tmp_normal, outside);
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = geoms_size;
intersect_point = tmp_intersect;
normal = tmp_normal;
obj_intersect = true;
objMaterialIdx = material_size - 1;
}
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
// updating rays
thrust::default_random_engine rng = makeSeededRandomEngine(iter, path_index, depth);
if (obj_intersect)
{
pathSegments[path_index].materialIdHit = objMaterialIdx;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[objMaterialIdx],
rng,
softness);
}
else
{
pathSegments[path_index].materialIdHit = geoms[hit_geom_index].materialid;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[geoms[hit_geom_index].materialid],
rng,
softness);
}
if (obj_intersect)
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = objMaterialIdx;
intersections[path_index].surfaceNormal = normal;
}
else
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
}
}
// pathTraceOneBounce handles ray intersections, generate intersections for shading,
// and scatter new ray. You might want to call scatterRay from interactions.h
__global__ void pathTraceOneBounceKDbareShortStack(
int depth
, int iter
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, Material * materials
, int material_size
, ShadeableIntersection * intersections
, float softness
, KDN::TriBare* triangles
, int numTriangles
, KDN::NodeBare* nodes
, int numNodes
, int* obj_materialOffsets
, bool hasobj
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
if (pathSegments[path_index].remainingBounces>0)
{
float t = 0.0;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
glm::vec3 hit;
glm::vec3 norm;
glm::vec3 bary;
glm::vec3 v1;
glm::vec3 v2;
glm::vec3 v3;
glm::vec3 n1;
glm::vec3 n2;
glm::vec3 n3;
int pidxo1 = 0;
int pidxo2 = 0;
int pidxo3 = 0;
bool intersected = false;
bool obj_intersect = false;
// naive parse through global geoms
int objMaterialIdx = -1;
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
objMaterialIdx = -1;
int iterator = 0;
if (hasobj)
{
// KD traversal standard short stack
traverseKDshort(nodes, numNodes, t,
pathSegment, triangles,
bary, objMaterialIdx,
material_size, hit,
norm, t_min,
hit_geom_index, intersect_point,
normal, tmp_intersect,
tmp_normal, obj_intersect,
intersections, obj_materialOffsets,
path_index);
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
// updating rays
thrust::default_random_engine rng = makeSeededRandomEngine(iter, path_index, depth);
if (obj_intersect)
{
pathSegments[path_index].materialIdHit = objMaterialIdx;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[objMaterialIdx],
rng,
softness);
}
else
{
pathSegments[path_index].materialIdHit = geoms[hit_geom_index].materialid;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[geoms[hit_geom_index].materialid],
rng,
softness);
}
if (obj_intersect)
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = objMaterialIdx;
intersections[path_index].surfaceNormal = normal;
}
else
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
}
}
// pathTraceOneBounce handles ray intersections, generate intersections for shading,
// This is the KD-tree implementation
__global__ void pathTraceOneBounceKD(
int depth
, int iter
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, Material * materials
, int material_size
, ShadeableIntersection * intersections
, float softness
, KDN::Triangle* triangles
, int numTriangles
, KDN::KDnode* nodes
, int numNodes
, bool hasobj
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
if (pathSegments[path_index].remainingBounces>0)
{
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
glm::vec3 hit;
glm::vec3 norm;
glm::vec3 bary;
glm::vec3 v1;
glm::vec3 v2;
glm::vec3 v3;
glm::vec3 n1;
glm::vec3 n2;
glm::vec3 n3;
int pidxo1 = 0;
int pidxo2 = 0;
int pidxo3 = 0;
bool intersected = false;
bool obj_intersect = false;
// naive parse through global geoms
int objMaterialIdx = -1;
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
objMaterialIdx = -1;
int iterator = 0;
if (hasobj)
{
// KDTREE TRAVERSAL
float dist = -1.0f;
glm::vec3 norm;
dist = -1.0f;
norm = glm::vec3(0.0f);
bool hitGeom = false;
Ray r = pathSegment.ray;
// USE AN ARRAY OF 0 NODE IDS AND SET THEM TO 1 once they're visited
// instead of using visited to avoid conflicts when reading from
// multiple threads
bool nodeIDs[1000] = { false };
if (numNodes != 0)
{
float mindist = FLT_MAX;
int currID = nodeIDs[nodes[0].ID];
// get the root node
for (int i = 0; i < numNodes; i++)
{
if (nodes[i].parentID == -1)
{
currID = nodes[i].ID;
break;
}
}
float boxdist = -1.0f;
while (true)
{
if (currID == -1)
break;
// check if it intersects the bounds
//printf("1\n");
hitGeom = intersectAABB(r, nodes[currID].bbox, dist);
//printf("2\n");
if (hitGeom == false)
{
nodeIDs[nodes[currID].ID] = true;
currID = nodes[currID].parentID;
}
else
{
if (nodes[currID].leftID != -1 && nodeIDs[nodes[currID].leftID] != true)
currID = nodes[currID].leftID;
else if (nodes[currID].rightID != -1 && nodeIDs[nodes[currID].rightID] != true)
currID = nodes[currID].rightID;
else if (nodeIDs[nodes[currID].ID] == false)
{
//std::cout << "NODE LOOP: " << nodes[currID].ID << " PARENT: " << nodes[currID].parentID << std::endl;
nodeIDs[nodes[currID].ID] = true;
int size = nodes[currID].triIdSize;
if (size > 0)
{
int start = nodes[currID].triIdStart;
int end = start + size;
for (int i = start; i < end; i++)
{
//KDN::Triangle t = triangles[i];
glm::vec3 v1(triangles[i].x1, triangles[i].y1, triangles[i].z1);
glm::vec3 v2(triangles[i].x2, triangles[i].y2, triangles[i].z2);
glm::vec3 v3(triangles[i].x3, triangles[i].y3, triangles[i].z3);
glm::vec3 barytemp(0.0f, 0.0f, 0.0f);
bool intersected = glm::intersectRayTriangle(r.origin,
r.direction,
v1, v2, v3, barytemp);
if (intersected && barytemp.z < mindist)
{
glm::vec3 bary(barytemp.x, barytemp.y, 1.0 - barytemp.x - barytemp.y);
glm::vec3 n1(triangles[i].nx1, triangles[i].ny1, triangles[i].nz1);
glm::vec3 n2(triangles[i].nx2, triangles[i].ny2, triangles[i].nz2);
glm::vec3 n3(triangles[i].nx3, triangles[i].ny3, triangles[i].nz3);
norm = (bary[0] * n1 + bary[1] * n2 + bary[2] * n3);
dist = barytemp.z;
mindist = dist;
//glm::vec3 pos = r.origin + r.direction * dist;
glm::vec3 intersect = r.origin + r.direction*dist;
//printf("KDLOOPPTR INTERSECT POINT: P: [%f %f %f] NODEID: %d\n", intersect.x,
// intersect.y,
// intersect.z,
// currID);
norm = glm::normalize((1 - bary.x - bary.y) * n1 + bary.x * n2 + (bary.y) * n3);
//norm(glm::normalize(n1));
//intersect += norm*0.0001f;
t = dist;
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = 0;// obj_materialOffsets[i];
intersect_point = intersect;
tmp_intersect = intersect;
tmp_normal = norm;//glm::vec3(0.0f, 1.0f, 0.0f);
intersections[path_index].t = t;
}
}
}
}
}
else
currID = nodes[currID].parentID;
}
}
}
if (hit_geom_index != -1)
{
hit_geom_index = 0;
obj_intersect = true;
t_min = dist;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
// updating rays
thrust::default_random_engine rng = makeSeededRandomEngine(iter, path_index, depth);
if (obj_intersect)
{
objMaterialIdx = 0;
pathSegments[path_index].materialIdHit = objMaterialIdx;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[objMaterialIdx],
rng,
softness);
}
else
{
pathSegments[path_index].materialIdHit = geoms[hit_geom_index].materialid;
scatterRay(pathSegments[path_index].ray,
pathSegments[path_index].color,
intersect_point,
normal,
materials[geoms[hit_geom_index].materialid],
rng,
softness);
}
if (obj_intersect)
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[0].materialid;
intersections[path_index].surfaceNormal = normal;
}
else
{
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
}
}
__global__ void shadeMaterial(
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
, bool enablesss
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
//idx = pathSegments[idx].initialidx;
//idx = pathSegments[idx].pixelIndex;
if (pathSegments[idx].remainingBounces>0)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f)
{ // if the intersection exists...
// Set up the RNG
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
else {
if (enablesss && (material.transmittance.x > 0.0f || material.transmittance.y > 0.0f || material.transmittance.z > 0.0f))
{
float scenescale = 1.0f;
float sss = scenescale * pathSegments[idx].ray.sdepth > 1.0 ? 1.0 : pathSegments[idx].ray.sdepth;
sss = 1.0f - sss < 0.0 ? 0.0 : sss;
sss = glm::pow(sss, 2);
pathSegments[idx].color *= (materialColor)* 1.0f + material.hasRefractive * material.specular.color + sss * material.transmittance;
}
else if (material.hasRefractive > 0.0f)
{
pathSegments[idx].color *= (materialColor)* 1.0f + material.hasRefractive * material.specular.color;
}
else if (material.hasReflective > 0.0f)
{
pathSegments[idx].color *= (materialColor)* 1.0f + material.hasReflective * material.specular.color;
}
else
{
pathSegments[idx].color *= (materialColor) * 1.0f;
}
pathSegments[idx].remainingBounces--;
}
}
else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
index = iterationPaths[index].pixelIndex;
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
// Add the current iteration's output to the current image
__global__ void partialGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
//index = iterationPaths[index].pixelIndex;
if (iterationPaths[index].remainingBounces == 0)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo,
int frame,
int iter,
float focalLength,
float dofAngle,
bool cacherays,
bool antialias,
float softness,
bool enableSss,
bool testingmode,
bool compaction,
bool enablekd,
bool vizkd,
bool usebbox,
bool shortstack) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 32;
///////////////////////////////////////////////////////////////////////////
// perform one iteration of path tracing
cudaEvent_t startGenRayFromCam, stopGenRayFromCam;
cudaEvent_t startPathTraceOneBounce, stopPathTraceOneBounce;
cudaEvent_t startShadeMaterial, stopShadeMaterial;
float millisecondsGenRayFromCam = 0.0f;
float millisecondsPathTraceOneBounce = 0.0f;
float millisecondsShadeMaterial = 0.0f;
float ms1 = 0.0;
float ms2 = 0.0;
float ms3 = 0.0;
// cache rays
if (cacherays)
{
if (iter == 1)
{
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >(cam, iter, traceDepth, dev_paths_cache, focalLength, dofAngle, antialias);
checkCUDAError("generate camera ray");
}
cudaMemcpy(dev_paths, dev_paths_cache, pixelcount*sizeof(PathSegment), cudaMemcpyDeviceToDevice);
}
else
{
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >(cam, iter, traceDepth, dev_paths, focalLength, dofAngle, antialias);
checkCUDAError("generate camera ray");
}
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
int num_paths_temp = num_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
if (testingmode)
{
cudaEventCreate(&startPathTraceOneBounce); cudaEventCreate(&stopPathTraceOneBounce); cudaEventRecord(startPathTraceOneBounce);
}
if (enablekd == false)
{
pathTraceOneBounce << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, iter
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_materials
, hst_scene->materials.size()
, dev_intersections
, softness
, hst_scene->obj_numshapes
, obj_numpolyverts
, obj_verts
, obj_norms
, obj_texts
, obj_polyoffsets
, obj_polysidxflat
, obj_polysbboxes
, hst_scene->polyidxcount
, obj_materialOffsets
, hst_scene->hasObj
, usebbox);
checkCUDAError("trace one bounce");
///*
//printf("numNodes = %d\n", hst_scene->numNodes);
//printf("numTriangles = %d\n", hst_scene->numTriangles);
}
else
{
if (vizkd)
{
pathTraceOneBounceKDbareBoxes << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, iter
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_materials
, hst_scene->materials.size()
, dev_intersections
, softness
, kd_trianglesBare
, hst_scene->numTriangles
, kd_nodesBare
, hst_scene->numNodes
, obj_materialOffsets
, hst_scene->hasObj);
checkCUDAError("trace one bounce kd");
//cudaEventQuery(0);
}
else
{
pathTraceOneBounceKDbare << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, iter
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_materials
, hst_scene->materials.size()
, dev_intersections
, softness
, kd_trianglesBare
, hst_scene->numTriangles
, kd_nodesBare
, hst_scene->numNodes
, obj_materialOffsets
, hst_scene->hasObj
, shortstack);
checkCUDAError("trace one bounce kd");
//cudaEventQuery(0);
}
}
cudaDeviceSynchronize();
depth++;
if (testingmode)
{
cudaEventRecord(stopPathTraceOneBounce); cudaEventSynchronize(stopPathTraceOneBounce);
ms2 = 0;
cudaEventElapsedTime(&ms2, startPathTraceOneBounce, stopPathTraceOneBounce);
millisecondsPathTraceOneBounce += ms2;
cudaEventDestroy(startPathTraceOneBounce);
cudaEventDestroy(stopPathTraceOneBounce);
}
shadeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > (
iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials,
enableSss
);
if (compaction)
{
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
partialGather << <numBlocksPixels, blockSize1d >> >(num_paths, dev_image, dev_paths);
}
if (compaction)
{
thrust::device_ptr<PathSegment> thrust_paths(dev_paths);
thrust::device_ptr<PathSegment> P = thrust::remove_if(thrust_paths, thrust_paths + num_paths, is_zero_bounce());
num_paths_temp = P - thrust_paths;
num_paths = num_paths_temp;
}
// after first hit
if (iter == 2)
{
thrust::device_ptr<PathSegment> thrust_paths2(dev_paths);
thrust::sort(thrust_paths2, thrust_paths2 + num_paths);
thrust::device_ptr<ShadeableIntersection> thrust_intersections(dev_intersections);
thrust::sort(thrust_intersections, thrust_intersections + num_paths);
}
// stop if numpaths is 0 or depth > 8 when testing without compaction
if (num_paths <= 0 || depth > 7)
iterationComplete = true;
}
if (testingmode)
{
printf(" pathTrace time = %f\n", millisecondsPathTraceOneBounce);
//printf("%f,\n", millisecondsPathTraceOneBounce);
}
if (!compaction)
{
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather << <numBlocksPixels, blockSize1d >> >(num_paths, dev_image, dev_paths);
}
///////////////////////////////////////////////////////////////////////////
sendImageToPBO << <blocksPerGrid2d, blockSize2d >> >(pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
|
8ea3c76fd1807be75925cb29257bab8840c4270e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// SDSC Summer Institute 2018
// Andreas Goetz ([email protected])
// CUDA program to add two vectors in parallel on the GPU
// version 2:
// launch a fixed number of blocks and threads
//
// /* FIXME */ comments need modifications
//
#include<stdio.h>
// define vector length, number of blocks NBL and threads per block TPB
#define N (255*2047)
#define NBL 256
#define TPB 128
//
// CUDA device function that adds two integer vectors
//
__global__ void add(int *a, int *b, int *c, int n){
/* FIXME
INSERT HERE CODE TO CALCULATE REQUIRED INDEX AND STRIDE
*/
while (tid < n) {
c[tid] = a[tid] + b[tid];
tid += stride;
}
}
//
// main program
//
int main(void){
int *h_a, *h_b, *h_c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
int i, err;
// allocate host memory
h_a = (int *) malloc(size);
h_b = (int *) malloc(size);
h_c = (int *) malloc(size);
// allocate device memory
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// initialize vectors
for (i=0; i<N; i++){
h_a[i] = i+1;
h_b[i] = i+1;
}
// copy input data to device
hipMemcpy(/* FIXME */);
hipMemcpy(/* FIXME */);
// add vectors by launching a sufficient number of blocks of the add() kernel
printf("\nLaunching vector addition kernel...\n");
printf("Vector length = %d\n",N);
printf("Blocks = %d\n",NBL);
printf("Threads per block = %d\n",TPB);
printf("Kernel copies = %d\n",NBL*TPB);
add<<</* FIXME */>>>(d_a, d_b, d_c, N);
// copy results back to host
hipMemcpy(/* FIXME */);
// deallocate memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// check results
err = 0;
for (i=0; i<N; i++){
if (h_c[i] != 2*(i+1)) err = 1;
}
if (err != 0){
printf("\n Error, %d elements do not match!\n\n", err);
} else {
printf("\n Success! All elements match.\n\n");
}
// deallocate host memory
free(h_a);
free(h_b);
free(h_c);
return err;
}
|
8ea3c76fd1807be75925cb29257bab8840c4270e.cu
|
// SDSC Summer Institute 2018
// Andreas Goetz ([email protected])
// CUDA program to add two vectors in parallel on the GPU
// version 2:
// launch a fixed number of blocks and threads
//
// /* FIXME */ comments need modifications
//
#include<stdio.h>
// define vector length, number of blocks NBL and threads per block TPB
#define N (255*2047)
#define NBL 256
#define TPB 128
//
// CUDA device function that adds two integer vectors
//
__global__ void add(int *a, int *b, int *c, int n){
/* FIXME
INSERT HERE CODE TO CALCULATE REQUIRED INDEX AND STRIDE
*/
while (tid < n) {
c[tid] = a[tid] + b[tid];
tid += stride;
}
}
//
// main program
//
int main(void){
int *h_a, *h_b, *h_c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
int i, err;
// allocate host memory
h_a = (int *) malloc(size);
h_b = (int *) malloc(size);
h_c = (int *) malloc(size);
// allocate device memory
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// initialize vectors
for (i=0; i<N; i++){
h_a[i] = i+1;
h_b[i] = i+1;
}
// copy input data to device
cudaMemcpy(/* FIXME */);
cudaMemcpy(/* FIXME */);
// add vectors by launching a sufficient number of blocks of the add() kernel
printf("\nLaunching vector addition kernel...\n");
printf("Vector length = %d\n",N);
printf("Blocks = %d\n",NBL);
printf("Threads per block = %d\n",TPB);
printf("Kernel copies = %d\n",NBL*TPB);
add<<</* FIXME */>>>(d_a, d_b, d_c, N);
// copy results back to host
cudaMemcpy(/* FIXME */);
// deallocate memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// check results
err = 0;
for (i=0; i<N; i++){
if (h_c[i] != 2*(i+1)) err = 1;
}
if (err != 0){
printf("\n Error, %d elements do not match!\n\n", err);
} else {
printf("\n Success! All elements match.\n\n");
}
// deallocate host memory
free(h_a);
free(h_b);
free(h_c);
return err;
}
|
bf9d261d32f08995700bfde68cc2fbce3bda18e5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cstdio>
// TODO Improve this kernel.
// Create a new file for each new category of optimizations you
// successively apply {force_kernel_2.cu, force_kernel_3.cu, ...}
__global__ void computeForcesKernel(int N, const double3 *p, double3 *f) {
for(int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < N;
idx += gridDim.x * blockDim.x){
double f_temp_x = 0.0;
double f_temp_y = 0.0;
double f_temp_z = 0.0;
for (int i = 0; i < N; ++i) {
if(i != idx){
double dx = p[i].x - p[idx].x;
double dy = p[i].y - p[idx].y;
double dz = p[i].z - p[idx].z;
double r = sqrt(dx * dx + dy * dy + dz * dz);
double inv_r = 1.0 / r;
f_temp_x += dx * inv_r * inv_r * inv_r;
f_temp_y += dy * inv_r * inv_r * inv_r;
f_temp_z += dz * inv_r * inv_r * inv_r;
}
}
f[idx].x = f_temp_x;
f[idx].y = f_temp_y;
f[idx].z = f_temp_z;
}
}
void computeForces(int N, const double3 *p, double3 *f) {
constexpr int numThreads = 1024;
int numBlocks = (N + numThreads - 1) / numThreads;
hipLaunchKernelGGL(( computeForcesKernel), dim3(numBlocks), dim3(numThreads), 0, 0, N, p, f);
}
|
bf9d261d32f08995700bfde68cc2fbce3bda18e5.cu
|
#include <cuda_runtime.h>
#include <cstdio>
// TODO Improve this kernel.
// Create a new file for each new category of optimizations you
// successively apply {force_kernel_2.cu, force_kernel_3.cu, ...}
__global__ void computeForcesKernel(int N, const double3 *p, double3 *f) {
for(int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < N;
idx += gridDim.x * blockDim.x){
double f_temp_x = 0.0;
double f_temp_y = 0.0;
double f_temp_z = 0.0;
for (int i = 0; i < N; ++i) {
if(i != idx){
double dx = p[i].x - p[idx].x;
double dy = p[i].y - p[idx].y;
double dz = p[i].z - p[idx].z;
double r = sqrt(dx * dx + dy * dy + dz * dz);
double inv_r = 1.0 / r;
f_temp_x += dx * inv_r * inv_r * inv_r;
f_temp_y += dy * inv_r * inv_r * inv_r;
f_temp_z += dz * inv_r * inv_r * inv_r;
}
}
f[idx].x = f_temp_x;
f[idx].y = f_temp_y;
f[idx].z = f_temp_z;
}
}
void computeForces(int N, const double3 *p, double3 *f) {
constexpr int numThreads = 1024;
int numBlocks = (N + numThreads - 1) / numThreads;
computeForcesKernel<<<numBlocks, numThreads>>>(N, p, f);
}
|
51b7476998d493219ff5137fe88d2441a759a0bd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void updateLagrangeMultiplierKernel4ADMM(float* u, float* v, float* lam, float* temp, float mu, uint32_t w, uint32_t h, uint32_t nc) {
uint32_t x = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t y = threadIdx.y + blockDim.y * blockIdx.y;
uint32_t c = threadIdx.z + blockDim.z * blockIdx.z;
if(x < w && y < h && c < nc) {
uint32_t index = x + w * y + w * h * c;
temp[index] = u[index] - v[index];
lam[index] = lam[index] + temp[index] * mu;
}
}
|
51b7476998d493219ff5137fe88d2441a759a0bd.cu
|
#include "includes.h"
__global__ void updateLagrangeMultiplierKernel4ADMM(float* u, float* v, float* lam, float* temp, float mu, uint32_t w, uint32_t h, uint32_t nc) {
uint32_t x = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t y = threadIdx.y + blockDim.y * blockIdx.y;
uint32_t c = threadIdx.z + blockDim.z * blockIdx.z;
if(x < w && y < h && c < nc) {
uint32_t index = x + w * y + w * h * c;
temp[index] = u[index] - v[index];
lam[index] = lam[index] + temp[index] * mu;
}
}
|
f89efd99c8ad8b6c4b19cd5d3ab3f5c14f70ca5b.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgesellcmv.cu normal z -> s, Wed Sep 17 15:08:43 2014
*/
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
#define PRECISION_s
// SELLC SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
__global__ void
sgesellcmv_kernel( int num_rows,
int num_cols,
int blocksize,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
float *d_x,
float beta,
float *d_y)
{
// threads assigned to rows
int Idx = blockDim.x * blockIdx.x + threadIdx.x ;
int offset = d_rowptr[ blockIdx.x ];
int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize;
if(Idx < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++){
int col = d_colind [offset+ blocksize * n + threadIdx.x ];
float val = d_val[offset+ blocksize * n + threadIdx.x];
if( val != 0){
dot=dot+val*d_x[col];
}
}
d_y[ Idx ] = dot * alpha + beta * d_y [ Idx ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLC/SELLP.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
blocksize magma_int_t
number of rows in one ELL-slice
@param
slices magma_int_t
number of slices in matrix
@param
alignment magma_int_t
number of threads assigned to one row (=1)
@param
alpha float
scalar multiplier
@param
d_val float*
array containing values of A in SELLC/P
@param
d_colind magma_int_t*
columnindices of A in SELLC/P
@param
d_rowptr magma_int_t*
rowpointer of SELLP
@param
d_x float*
input vector x
@param
beta float
scalar multiplier
@param
d_y float*
input/output vector y
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgesellcmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
float *d_x,
float beta,
float *d_y ){
// the kernel can only handle up to 65535 slices
// (~2M rows for blocksize 32)
dim3 grid( slices, 1, 1);
hipLaunchKernelGGL(( sgesellcmv_kernel), dim3(grid), dim3(blocksize), 0, magma_stream ,
m, n, blocksize, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
return MAGMA_SUCCESS;
}
|
f89efd99c8ad8b6c4b19cd5d3ab3f5c14f70ca5b.cu
|
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgesellcmv.cu normal z -> s, Wed Sep 17 15:08:43 2014
*/
#include "cuda_runtime.h"
#include <stdio.h>
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
#define PRECISION_s
// SELLC SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
__global__ void
sgesellcmv_kernel( int num_rows,
int num_cols,
int blocksize,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
float *d_x,
float beta,
float *d_y)
{
// threads assigned to rows
int Idx = blockDim.x * blockIdx.x + threadIdx.x ;
int offset = d_rowptr[ blockIdx.x ];
int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize;
if(Idx < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++){
int col = d_colind [offset+ blocksize * n + threadIdx.x ];
float val = d_val[offset+ blocksize * n + threadIdx.x];
if( val != 0){
dot=dot+val*d_x[col];
}
}
d_y[ Idx ] = dot * alpha + beta * d_y [ Idx ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLC/SELLP.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
blocksize magma_int_t
number of rows in one ELL-slice
@param
slices magma_int_t
number of slices in matrix
@param
alignment magma_int_t
number of threads assigned to one row (=1)
@param
alpha float
scalar multiplier
@param
d_val float*
array containing values of A in SELLC/P
@param
d_colind magma_int_t*
columnindices of A in SELLC/P
@param
d_rowptr magma_int_t*
rowpointer of SELLP
@param
d_x float*
input vector x
@param
beta float
scalar multiplier
@param
d_y float*
input/output vector y
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgesellcmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
float *d_x,
float beta,
float *d_y ){
// the kernel can only handle up to 65535 slices
// (~2M rows for blocksize 32)
dim3 grid( slices, 1, 1);
sgesellcmv_kernel<<< grid, blocksize, 0, magma_stream >>>
( m, n, blocksize, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
return MAGMA_SUCCESS;
}
|
c79a0648816e516228d57e6b96eccb267212b962.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_fill(float4* d_dx1, float val, int numel) {
size_t col = threadIdx.x + blockIdx.x * blockDim.x;
if (col >= numel) { return; }
d_dx1[col].x = val;
d_dx1[col].y = val;
d_dx1[col].z = val;
d_dx1[col].w = val;
}
|
c79a0648816e516228d57e6b96eccb267212b962.cu
|
#include "includes.h"
__global__ void kernel_fill(float4* d_dx1, float val, int numel) {
size_t col = threadIdx.x + blockIdx.x * blockDim.x;
if (col >= numel) { return; }
d_dx1[col].x = val;
d_dx1[col].y = val;
d_dx1[col].z = val;
d_dx1[col].w = val;
}
|
2316901dbe0db28081b25bc3ad137c6ded926796.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../../gpu_utils/gpu_utils.h"
#include <stddef.h>
#include <stdint.h>
#include "bondarenko_2004.h"
__global__ void kernel_set_model_initial_conditions(real *sv, int num_volumes, size_t pitch, bool use_adpt_dt, real min_dt) {
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes) {
*((real * )((char *) sv + pitch * 0) + threadID) = -82.4202f; // V millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.115001; // Cai micromolar
*((real * )((char *) sv + pitch * 2) + threadID) = 0.115001; // Cass micromolar
*((real * )((char *) sv + pitch * 3) + threadID) = 1299.5; // CaJSR micromolar
*((real * )((char *) sv + pitch * 4) + threadID) = 1299.5; // CaNSR micromolar
*((real * )((char *) sv + pitch * 5) + threadID) = 0.0; // P_RyR dimensionless
*((real * )((char *) sv + pitch * 6) + threadID) = 11.2684; // LTRPN_Ca micromolar
*((real * )((char *) sv + pitch * 7) + threadID) = 125.29; // HTRPN_Ca micromolar
*((real * )((char *) sv + pitch * 8) + threadID) = 0.149102e-4; // P_O1 dimensionless
*((real * )((char *) sv + pitch * 9) + threadID) = 0.951726e-10; // P_O2 dimensionless
*((real * )((char *) sv + pitch * 10) + threadID) = 0.16774e-3; // P_C2 dimensionless
*((real * )((char *) sv + pitch * 11) + threadID) = 0.930308e-18; // O dimensionless
*((real * )((char *) sv + pitch * 12) + threadID) = 0.124216e-3; // C2 dimensionless
*((real * )((char *) sv + pitch * 13) + threadID) = 0.578679e-8; // C3 dimensionless
*((real * )((char *) sv + pitch * 14) + threadID) = 0.119816e-12; // C4 dimensionless
*((real * )((char *) sv + pitch * 15) + threadID) = 0.497923e-18; // I1 dimensionless
*((real * )((char *) sv + pitch * 16) + threadID) = 0.345847e-13; // I2 dimensionless
*((real * )((char *) sv + pitch * 17) + threadID) = 0.185106e-13; // I3 dimensionless
*((real * )((char *) sv + pitch * 18) + threadID) = 14237.1; // Nai micromolar
*((real * )((char *) sv + pitch * 19) + threadID) = 0.020752; // C_Na2 dimensionless
*((real * )((char *) sv + pitch * 20) + threadID) = 0.279132e-3; // C_Na1 dimensionless
*((real * )((char *) sv + pitch * 21) + threadID) = 0.713483e-6; // O_Na dimensionless
*((real * )((char *) sv + pitch * 22) + threadID) = 0.153176e-3; // IF_Na dimensionless
*((real * )((char *) sv + pitch * 23) + threadID) = 0.673345e-6; // I1_Na dimensionless
*((real * )((char *) sv + pitch * 24) + threadID) = 0.155787e-8; // I2_Na dimensionless
*((real * )((char *) sv + pitch * 25) + threadID) = 0.0113879; // IC_Na2 dimensionless
*((real * )((char *) sv + pitch * 26) + threadID) = 0.34278; // IC_Na3 dimensionless
*((real * )((char *) sv + pitch * 27) + threadID) = 143720.0; // Ki micromolar
*((real * )((char *) sv + pitch * 28) + threadID) = 0.265563e-2; // ato_f dimensionless
*((real * )((char *) sv + pitch * 29) + threadID) = 0.999977; // ito_f dimensionless
*((real * )((char *) sv + pitch * 30) + threadID) = 0.417069e-3; // ato_s dimensionless
*((real * )((char *) sv + pitch * 31) + threadID) = 0.998543; // ito_s dimensionless
*((real * )((char *) sv + pitch * 32) + threadID) = 0.262753e-3; // nKs dimensionless
*((real * )((char *) sv + pitch * 33) + threadID) = 0.417069e-3; // aur dimensionless
*((real * )((char *) sv + pitch * 34) + threadID) = 0.998543; // iur dimensionless
*((real * )((char *) sv + pitch * 35) + threadID) = 0.417069e-3; // aKss dimensionless
*((real * )((char *) sv + pitch * 36) + threadID) = 1.0; // iKss dimensionless
*((real * )((char *) sv + pitch * 37) + threadID) = 0.641229e-3; // C_K2 dimensionless
*((real * )((char *) sv + pitch * 38) + threadID) = 0.992513e-3; // C_K1 dimensionless
*((real * )((char *) sv + pitch * 39) + threadID) = 0.175298e-3; // O_K dimensionless
*((real * )((char *) sv + pitch * 40) + threadID) = 0.319129e-4; // I_K dimensionless
if(use_adpt_dt) {
*((real *)((char *)sv + pitch * NEQ) + threadID) = min_dt; // dt
*((real *)((char *)sv + pitch * (NEQ + 1)) + threadID) = 0.0; // time_new
*((real *)((char *)sv + pitch * (NEQ + 2)) + threadID) = 0.0; // previous dt
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY, real stim_current, int thread_id, real dt, size_t pitch, bool use_adpt_dt) {
// State variables
real V_old_;
real Cai_old_;
real Cass_old_;
real CaJSR_old_;
real CaNSR_old_;
real P_RyR_old_;
real LTRPN_Ca_old_;
real HTRPN_Ca_old_;
real P_O1_old_;
real P_O2_old_;
real P_C2_old_;
real O_old_;
real C2_old_;
real C3_old_;
real C4_old_;
real I1_old_;
real I2_old_;
real I3_old_;
real Nai_old_;
real C_Na2_old_;
real C_Na1_old_;
real O_Na_old_;
real IF_Na_old_;
real I1_Na_old_;
real I2_Na_old_;
real IC_Na2_old_;
real IC_Na3_old_;
real Ki_old_;
real ato_f_old_;
real ito_f_old_;
real ato_s_old_;
real ito_s_old_;
real nKs_old_;
real aur_old_;
real iur_old_;
real aKss_old_;
real iKss_old_;
real C_K2_old_;
real C_K1_old_;
real O_K_old_;
real I_K_old_;
if(use_adpt_dt) {
V_old_ = sv[0]; // initial value = -82.4202 millivolt
Cai_old_ = sv[1]; // initial value = 0.115001 micromolar
Cass_old_ = sv[2]; // initial value = 0.115001 micromolar
CaJSR_old_ = sv[3]; // initial value = 1299.5 micromolar
CaNSR_old_ = sv[4]; // initial value = 1299.5 micromolar
P_RyR_old_ = sv[5]; // initial value = 0 dimensionless
LTRPN_Ca_old_ = sv[6]; // initial value = 11.2684 micromolar
HTRPN_Ca_old_ = sv[7]; // initial value = 125.29 micromolar
P_O1_old_ = sv[8]; // initial value = 0.149102e-4 dimensionless
P_O2_old_ = sv[9]; // initial value = 0.951726e-10 dimensionless
P_C2_old_ = sv[10]; // initial value = 0.16774e-3 dimensionless
O_old_ = sv[11]; // initial value = 0.930308e-18 dimensionless
C2_old_ = sv[12]; // initial value = 0.124216e-3 dimensionless
C3_old_ = sv[13]; // initial value = 0.578679e-8 dimensionless
C4_old_ = sv[14]; // initial value = 0.119816e-12 dimensionless
I1_old_ = sv[15]; // initial value = 0.497923e-18 dimensionless
I2_old_ = sv[16]; // initial value = 0.345847e-13 dimensionless
I3_old_ = sv[17]; // initial value = 0.185106e-13 dimensionless
Nai_old_ = sv[18]; // initial value = 14237.1 micromolar
C_Na2_old_ = sv[19]; // initial value = 0.020752 dimensionless
C_Na1_old_ = sv[20]; // initial value = 0.279132e-3 dimensionless
O_Na_old_ = sv[21]; // initial value = 0.713483e-6 dimensionless
IF_Na_old_ = sv[22]; // initial value = 0.153176e-3 dimensionless
I1_Na_old_ = sv[23]; // initial value = 0.673345e-6 dimensionless
I2_Na_old_ = sv[24]; // initial value = 0.155787e-8 dimensionless
IC_Na2_old_ = sv[25]; // initial value = 0.0113879 dimensionless
IC_Na3_old_ = sv[26]; // initial value = 0.34278 dimensionless
Ki_old_ = sv[27]; // initial value = 143720 micromolar
ato_f_old_ = sv[28]; // initial value = 0.265563e-2 dimensionless
ito_f_old_ = sv[29]; // initial value = 0.999977 dimensionless
ato_s_old_ = sv[30]; // initial value = 0.417069e-3 dimensionless
ito_s_old_ = sv[31]; // initial value = 0.998543 dimensionless
nKs_old_ = sv[32]; // initial value = 0.262753e-3 dimensionless
aur_old_ = sv[33]; // initial value = 0.417069e-3 dimensionless
iur_old_ = sv[34]; // initial value = 0.998543 dimensionless
aKss_old_ = sv[35]; // initial value = 0.417069e-3 dimensionless
iKss_old_ = sv[36]; // initial value = 1 dimensionless
C_K2_old_ = sv[37]; // initial value = 0.641229e-3 dimensionless
C_K1_old_ = sv[38]; // initial value = 0.992513e-3 dimensionless
O_K_old_ = sv[39]; // initial value = 0.175298e-3 dimensionless
I_K_old_ = sv[40]; // initial value = 0.319129e-4 dimensionless
}
else {
V_old_ = *((real *)((char *)sv + pitch * 0) + thread_id); // initial value = -82.4202 millivolt
Cai_old_ = *((real *)((char *)sv + pitch * 1) + thread_id); // initial value = 0.115001 micromolar
Cass_old_ = *((real *)((char *)sv + pitch * 2) + thread_id); // initial value = 0.115001 micromolar
CaJSR_old_ = *((real *)((char *)sv + pitch * 3) + thread_id); // initial value = 1299.5 micromolar
CaNSR_old_ = *((real *)((char *)sv + pitch * 4) + thread_id); // initial value = 1299.5 micromolar
P_RyR_old_ = *((real *)((char *)sv + pitch * 5) + thread_id); // initial value = 0 dimensionless
LTRPN_Ca_old_ = *((real *)((char *)sv + pitch * 6) + thread_id); // initial value = 11.2684 micromolar
HTRPN_Ca_old_ = *((real *)((char *)sv + pitch * 7) + thread_id); // initial value = 125.29 micromolar
P_O1_old_ = *((real *)((char *)sv + pitch * 8) + thread_id); // initial value = 0.149102e-4 dimensionless
P_O2_old_ = *((real *)((char *)sv + pitch * 9) + thread_id); // initial value = 0.951726e-10 dimensionless
P_C2_old_ = *((real *)((char *)sv + pitch * 10) + thread_id); // initial value = 0.16774e-3 dimensionless
O_old_ = *((real *)((char *)sv + pitch * 11) + thread_id); // initial value = 0.930308e-18 dimensionless
C2_old_ = *((real *)((char *)sv + pitch * 12) + thread_id); // initial value = 0.124216e-3 dimensionless
C3_old_ = *((real *)((char *)sv + pitch * 13) + thread_id); // initial value = 0.578679e-8 dimensionless
C4_old_ = *((real *)((char *)sv + pitch * 14) + thread_id); // initial value = 0.119816e-12 dimensionless
I1_old_ = *((real *)((char *)sv + pitch * 15) + thread_id); // initial value = 0.497923e-18 dimensionless
I2_old_ = *((real *)((char *)sv + pitch * 16) + thread_id); // initial value = 0.345847e-13 dimensionless
I3_old_ = *((real *)((char *)sv + pitch * 17) + thread_id); // initial value = 0.185106e-13 dimensionless
Nai_old_ = *((real *)((char *)sv + pitch * 18) + thread_id); // initial value = 14237.1 micromolar
C_Na2_old_ = *((real *)((char *)sv + pitch * 19) + thread_id); // initial value = 0.020752 dimensionless
C_Na1_old_ = *((real *)((char *)sv + pitch * 20) + thread_id); // initial value = 0.279132e-3 dimensionless
O_Na_old_ = *((real *)((char *)sv + pitch * 21) + thread_id); // initial value = 0.713483e-6 dimensionless
IF_Na_old_ = *((real *)((char *)sv + pitch * 22) + thread_id); // initial value = 0.153176e-3 dimensionless
I1_Na_old_ = *((real *)((char *)sv + pitch * 23) + thread_id); // initial value = 0.673345e-6 dimensionless
I2_Na_old_ = *((real *)((char *)sv + pitch * 24) + thread_id); // initial value = 0.155787e-8 dimensionless
IC_Na2_old_ = *((real *)((char *)sv + pitch * 25) + thread_id); // initial value = 0.0113879 dimensionless
IC_Na3_old_ = *((real *)((char *)sv + pitch * 26) + thread_id); // initial value = 0.34278 dimensionless
Ki_old_ = *((real *)((char *)sv + pitch * 27) + thread_id); // initial value = 143720 micromolar
ato_f_old_ = *((real *)((char *)sv + pitch * 28) + thread_id); // initial value = 0.265563e-2 dimensionless
ito_f_old_ = *((real *)((char *)sv + pitch * 29) + thread_id); // initial value = 0.999977 dimensionless
ato_s_old_ = *((real *)((char *)sv + pitch * 30) + thread_id); // initial value = 0.417069e-3 dimensionless
ito_s_old_ = *((real *)((char *)sv + pitch * 31) + thread_id); // initial value = 0.998543 dimensionless
nKs_old_ = *((real *)((char *)sv + pitch * 32) + thread_id); // initial value = 0.262753e-3 dimensionless
aur_old_ = *((real *)((char *)sv + pitch * 33) + thread_id); // initial value = 0.417069e-3 dimensionless
iur_old_ = *((real *)((char *)sv + pitch * 34) + thread_id); // initial value = 0.998543 dimensionless
aKss_old_ = *((real *)((char *)sv + pitch * 35) + thread_id); // initial value = 0.417069e-3 dimensionless
iKss_old_ = *((real *)((char *)sv + pitch * 36) + thread_id); // initial value = 1 dimensionless
C_K2_old_ = *((real *)((char *)sv + pitch * 37) + thread_id); // initial value = 0.641229e-3 dimensionless
C_K1_old_ = *((real *)((char *)sv + pitch * 38) + thread_id); // initial value = 0.992513e-3 dimensionless
O_K_old_ = *((real *)((char *)sv + pitch * 39) + thread_id); // initial value = 0.175298e-3 dimensionless
I_K_old_ = *((real *)((char *)sv + pitch * 40) + thread_id); // initial value = 0.319129e-4 dimensionless
}
// Parameters
const real Acap = 1.534e-4f; // cm2
const real Cm = 1.0f; // microF_per_cm2
const real Vmyo = 25.84e-6f; // microlitre
const real F = 96.5f; // coulomb_per_millimole
const real VJSR = 0.12e-6f; // microlitre
const real Vss = 1.485e-9f; // microlitre
const real VNSR = 2.098e-6f; // microlitre
const real CMDN_tot = 50.0f; // micromolar
const real Km_CMDN = 0.238f; // micromolar
const real CSQN_tot = 15000.0f; // micromolar
const real Km_CSQN = 800.0f; // micromolar
const real v1 = 4.5f; // per_millisecond
const real tau_tr = 20.0f; // millisecond
const real tau_xfer = 8.0f; // millisecond
const real v2 = 1.74e-5f; // per_millisecond
const real v3 = 0.45f; // micromolar_per_millisecond
const real Km_up = 0.5f; // micromolar
const real k_plus_htrpn = 0.00237f; // per_micromolar_millisecond
const real HTRPN_tot = 140.0f; // micromolar
const real k_plus_ltrpn = 0.0327f; // per_micromolar_millisecond
const real LTRPN_tot = 70.0f; // micromolar
const real k_minus_htrpn = 3.2e-5f; // per_millisecond
const real k_minus_ltrpn = 0.0196f; // per_millisecond
const real i_CaL_max = 7.0f; // picoA_per_picoF
const real k_plus_a = 0.006075f; // micromolar4_per_millisecond
const real n = 4.0f; // dimensionless
const real k_minus_b = 0.965f; // per_millisecond
const real k_minus_c = 0.0008f; // per_millisecond
const real k_minus_a = 0.07125f; // per_millisecond
const real k_plus_b = 0.00405f; // micromolar3_per_millisecond
const real m = 3.0f; // dimensionless
const real k_plus_c = 0.009f; // per_millisecond
const real g_CaL = 0.1729f; // milliS_per_microF
const real E_CaL = 63.0f; // millivolt
const real Kpcb = 0.0005f; // per_millisecond
const real Kpc_max = 0.23324f; // per_millisecond
const real Kpc_half = 20.0f; // micromolar
const real i_pCa_max = 1.0f; // picoA_per_picoF
const real Km_pCa = 0.5f; // micromolar
const real k_NaCa = 292.8f; // picoA_per_picoF
const real K_mNa = 87500.0f; // micromolar
const real Nao = 140000.0f; // micromolar
const real K_mCa = 1380.0f; // micromolar
const real Cao = 1800.0f; // micromolar
const real k_sat = 0.1f; // dimensionless
const real eta = 0.35f; // dimensionless
const real R = 8.314f; // joule_per_mole_kelvin
const real T = 298.0f; // kelvin
const real g_Cab = 0.000367f; // milliS_per_microF
const real g_Na = 13.0f; // milliS_per_microF
const real Ko = 5400.0f; // micromolar
const real g_Nab = 0.0026f; // milliS_per_microF
const real g_Kto_f = 0.4067f; // milliS_per_microF
const real g_Kto_s = 0.0f; // milliS_per_microF
const real g_Ks = 0.00575f; // milliS_per_microF
const real g_Kur = 0.16f; // milliS_per_microF
const real g_Kss = 0.05f; // milliS_per_microF
const real g_Kr = 0.078f; // milliS_per_microF
const real kf = 0.023761f; // per_millisecond
const real kb = 0.036778f; // per_millisecond
const real i_NaK_max = 0.88f; // picoA_per_picoF
const real Km_Nai = 21000.0f; // micromolar
const real Km_Ko = 1500.0f; // micromolar
const real g_ClCa = 10.0f; // milliS_per_microF
const real Km_Cl = 10.0f; // micromolar
const real E_Cl = -40.0f; // millivolt
// Algebraic Equations
real calc_i_stim = stim_current; //0
real calc_Bi = pow((1.0f+((CMDN_tot*Km_CMDN)/pow((Km_CMDN+Cai_old_),2.0f))),(-1.0f)); //6
real calc_Bss = pow((1.0f+((CMDN_tot*Km_CMDN)/pow((Km_CMDN+Cass_old_),2.0f))),(-1.0f)); //7
real calc_BJSR = pow((1.0f+((CSQN_tot*Km_CSQN)/pow((Km_CSQN+CaJSR_old_),2.0f))),(-1.0f)); //8
real calc_J_rel = (v1*(P_O1_old_+P_O2_old_)*(CaJSR_old_-Cass_old_)*P_RyR_old_); //9
real calc_J_tr = ((CaNSR_old_-CaJSR_old_)/tau_tr); //10
real calc_J_xfer = ((Cass_old_-Cai_old_)/tau_xfer); //11
real calc_J_leak = (v2*(CaNSR_old_-Cai_old_)); //12
real calc_J_up = ((v3*pow(Cai_old_,2.0f))/(pow(Km_up,2.0f)+pow(Cai_old_,2.0f))); //13
real calc_J_trpn = (((k_plus_htrpn*Cai_old_*(HTRPN_tot-HTRPN_Ca_old_))+(k_plus_ltrpn*Cai_old_*(LTRPN_tot-LTRPN_Ca_old_)))-((k_minus_htrpn*HTRPN_Ca_old_)+(k_minus_ltrpn*LTRPN_Ca_old_))); //14
real calc_P_C1 = (1.0f-(P_C2_old_+P_O1_old_+P_O2_old_)); //19
real calc_i_CaL = (g_CaL*O_old_*(V_old_-E_CaL)); //22
real calc_C1 = (1.0f-(O_old_+C2_old_+C3_old_+C4_old_+I1_old_+I2_old_+I3_old_)); //24
real calc_alpha = ((0.4f*exp(((V_old_+12.0f)/10.0f))*((1.0f+(0.7f*exp(((-pow((V_old_+40.0f),2.0f))/10.0f))))-(0.75f*exp(((-pow((V_old_+20.0f),2.0f))/400.0f)))))/(1.0f+(0.12f*exp(((V_old_+12.0f)/10.0f))))); //31
real calc_beta = (0.05f*exp(((-(V_old_+12.0f))/13.0f))); //32
real calc_gamma = ((Kpc_max*Cass_old_)/(Kpc_half+Cass_old_)); //33
real calc_Kpcf = (13.0f*(1.0f-exp(((-pow((V_old_+14.5f),2.0f))/100.0f)))); //34
real calc_i_pCa = ((i_pCa_max*pow(Cai_old_,2.0f))/(pow(Km_pCa,2.0f)+pow(Cai_old_,2.0f))); //35
real calc_i_NaCa = (((((((k_NaCa*1.0f)/(pow(K_mNa,3.0)+pow(Nao,3.0)))*1.0f)/(K_mCa+Cao))*1.0f)/(1.0f+(k_sat*exp((((eta-1.0f)*V_old_*F)/(R*T))))))*((exp(((eta*V_old_*F)/(R*T)))*pow(Nai_old_,3.0)*Cao)-(exp((((eta-1.0f)*V_old_*F)/(R*T)))*pow(Nao,3.0)*Cai_old_))); //36
real calc_E_CaN = (((R*T)/(2.0f*F))*log((Cao/Cai_old_))); //38
real calc_E_Na = (((R*T)/F)*log((((0.9f*Nao)+(0.1f*Ko))/((0.9f*Nai_old_)+(0.1f*Ki_old_))))); //41
real calc_C_Na3 = (1.0f-(O_Na_old_+C_Na1_old_+C_Na2_old_+IF_Na_old_+I1_Na_old_+I2_Na_old_+IC_Na2_old_+IC_Na3_old_)); //42
real calc_alpha_Na11 = (3.802f/((0.1027f*exp(((-(V_old_+2.5f))/17.0f)))+(0.2f*exp(((-(V_old_+2.5f))/150.0f))))); //51
real calc_alpha_Na12 = (3.802f/((0.1027f*exp(((-(V_old_+2.5f))/15.0f)))+(0.23f*exp(((-(V_old_+2.5f))/150.0f))))); //52
real calc_alpha_Na13 = (3.802f/((0.1027f*exp(((-(V_old_+2.5f))/12.0f)))+(0.25f*exp(((-(V_old_+2.5f))/150.0f))))); //53
real calc_beta_Na11 = (0.1917f*exp(((-(V_old_+2.5f))/20.3f))); //54
real calc_beta_Na12 = (0.2f*exp(((-(V_old_-2.5f))/20.3f))); //55
real calc_beta_Na13 = (0.22f*exp(((-(V_old_-7.5f))/20.3f))); //56
real calc_alpha_Na3 = (7e-7f*exp(((-(V_old_+7.0f))/7.7f))); //57
real calc_beta_Na3 = (0.00854f+(0.00002f*V_old_)); //58
real calc_alpha_Na2 = (1.0f/((0.188495f*exp(((-(V_old_+7.0f))/16.6f)))+0.393956f)); //59
real calc_E_K = (((R*T)/F)*log((Ko/Ki_old_))); //68
real calc_alpha_a = (0.18064f*exp((0.03577f*(V_old_+ 30.0f)))); //71
real calc_beta_a = (0.3956f*exp(((-0.06237f)*(V_old_+ 30.0f)))); //72
real calc_alpha_i = ((0.000152f*exp(((-(V_old_+13.5f))/7.0f)))/((0.067083f*exp(((-(V_old_+33.5f))/7.0f)))+1.0f)); //73
real calc_beta_i = ((0.00095f*exp(((V_old_+33.5f)/7.0f)))/((0.051335f*exp(((V_old_+33.5f)/7.0f)))+1.0f)); //74
real calc_ass = (1.0f/(1.0f+exp(((-(V_old_+22.5f))/7.7f)))); //78
real calc_iss = (1.0f/(1.0f+exp(((V_old_+45.2f)/5.7f)))); //79
real calc_tau_ta_s = ((0.493f*exp(((-0.0629f)*V_old_)))+2.058f); //80
real calc_tau_ti_s = (270.0f+(1050.0f/(1.0f+exp(((V_old_+45.2f)/5.7f))))); //81
real calc_alpha_n = (V_old_ != -26.5f)?((0.00000481333f*(V_old_+26.5f))/(1.0f-exp(((-0.128f)*(V_old_+26.5f))))): 0.000037604f; //85
real calc_beta_n = (0.0000953333f*exp(((-0.038f)*(V_old_+26.5f)))); //86
real calc_tau_aur = ((0.493f*exp(((-0.0629f)*V_old_)))+2.058f); //90
real calc_tau_iur = (1200.0f-(170.0f/(1.0f+exp(((V_old_+45.2f)/5.7f))))); //91
real calc_tau_Kss = ((39.3f*exp(((-0.0862f)*V_old_)))+13.17f); //95
real calc_i_Kr = (g_Kr*O_K_old_*(V_old_-(((R*T)/F)*log((((0.98f*Ko)+(0.02f*Nao))/((0.98f*Ki_old_)+(0.02f*Nai_old_))))))); //96
real calc_C_K0 = (1.0f-(C_K1_old_+C_K2_old_+O_K_old_+I_K_old_)); //97
real calc_alpha_a0 = (0.022348f*exp((0.01176f*V_old_))); //102
real calc_beta_a0 = (0.047002f*exp(((-0.0631f)*V_old_))); //103
real calc_alpha_a1 = (0.013733f*exp((0.038198f*V_old_))); //104
real calc_beta_a1 = (0.0000689f*exp(((-0.04178f)*V_old_))); //105
real calc_alpha_i_duplicated_rapid_delayed_rectifier_potassium_current = (0.090821f*exp((0.023391f*(V_old_+5.0f)))); //106
real calc_beta_i_duplicated_rapid_delayed_rectifier_potassium_current = (0.006497f*exp(((-0.03268f)*(V_old_+5.0f)))); //107
real calc_sigma = ((1.0f/7.0f)*(exp((Nao/67300.0f))-1.0f)); //110
real calc_O_ClCa = (0.2f/(1.0f+exp(((-(V_old_-46.7f))/7.8f)))); //112
real calc_beta_Na2 = ((calc_alpha_Na13*calc_alpha_Na2*calc_alpha_Na3)/(calc_beta_Na13*calc_beta_Na3)); //60
real calc_alpha_Na4 = (calc_alpha_Na2/1000.0f); //61
real calc_beta_Na4 = calc_alpha_Na3; //62
real calc_alpha_Na5 = (calc_alpha_Na2/95000.0f); //63
real calc_beta_Na5 = (calc_alpha_Na3/50.0f); //64
real calc_i_Nab = (g_Nab*(V_old_-calc_E_Na)); //65
real calc_i_Kto_s = (g_Kto_s*ato_s_old_*ito_s_old_*(V_old_-calc_E_K)); //75
real calc_i_K1 = ((((0.2938f*Ko)/(Ko+210.0f))*(V_old_-calc_E_K))/(1.0f+exp((0.0896f*(V_old_-calc_E_K))))); //82
real calc_i_Ks = (g_Ks*pow(nKs_old_,2.0f)*(V_old_-calc_E_K)); //83
real calc_i_Kur = (g_Kur*aur_old_*iur_old_*(V_old_-calc_E_K)); //87
real calc_i_Kss = (g_Kss*aKss_old_*iKss_old_*(V_old_-calc_E_K)); //92
real calc_i_Cab = (g_Cab*(V_old_-calc_E_CaN)); //37
real calc_i_Na = (g_Na*O_Na_old_*(V_old_-calc_E_Na)); //40
real calc_i_Kto_f = (g_Kto_f*pow(ato_f_old_,3.0)*ito_f_old_*(V_old_-calc_E_K)); //67
real calc_f_NaK = (1.0f/(1.0f+(0.1245f*exp((((-0.1f)*V_old_*F)/(R*T))))+(0.0365f*calc_sigma*exp((((-V_old_)*F)/(R*T)))))); //109
real calc_i_ClCa = (((g_ClCa*calc_O_ClCa*Cai_old_)/(Cai_old_+Km_Cl))*(V_old_-E_Cl)); //111
real calc_i_NaK = ((((i_NaK_max*calc_f_NaK*1.0f)/(1.0f+pow((Km_Nai/Nai_old_),1.5)))*Ko)/(Ko+Km_Ko)); //108
// Differential Equations
real d_dt_V = (-(calc_i_CaL+calc_i_pCa+calc_i_NaCa+calc_i_Cab+calc_i_Na+calc_i_Nab+calc_i_NaK+calc_i_Kto_f+calc_i_Kto_s+calc_i_K1+calc_i_Ks+calc_i_Kur+calc_i_Kss+calc_i_Kr+calc_i_ClCa+calc_i_stim)); // 1
real d_dt_Cai = (calc_Bi*((calc_J_leak+calc_J_xfer)-(calc_J_up+calc_J_trpn+((((calc_i_Cab+calc_i_pCa)-(2.0f*calc_i_NaCa))*Acap*Cm)/(2.0f*Vmyo*F))))); // 2
real d_dt_Cass = (calc_Bss*(((calc_J_rel*VJSR)/Vss)-(((calc_J_xfer*Vmyo)/Vss)+((calc_i_CaL*Acap*Cm)/(2.0f*Vss*F))))); // 3
real d_dt_CaJSR = (calc_BJSR*(calc_J_tr-calc_J_rel)); // 4
real d_dt_CaNSR = ((((calc_J_up-calc_J_leak)*Vmyo)/VNSR)-((calc_J_tr*VJSR)/VNSR)); // 5
real d_dt_P_RyR = (((-0.04f)*P_RyR_old_)-(((0.1f*calc_i_CaL)/i_CaL_max)*exp(((-pow((V_old_-5.0f),2.0f))/648.0f)))); // 15
real d_dt_LTRPN_Ca = ((k_plus_ltrpn*Cai_old_*(LTRPN_tot-LTRPN_Ca_old_))-(k_minus_ltrpn*LTRPN_Ca_old_)); // 16
real d_dt_HTRPN_Ca = ((k_plus_htrpn*Cai_old_*(HTRPN_tot-HTRPN_Ca_old_))-(k_minus_htrpn*HTRPN_Ca_old_)); // 17
real d_dt_P_O1 = (((k_plus_a*pow(Cass_old_,n)*calc_P_C1)+(k_minus_b*P_O2_old_)+(k_minus_c*P_C2_old_))-((k_minus_a*P_O1_old_)+(k_plus_b*pow(Cass_old_,m)*P_O1_old_)+(k_plus_c*P_O1_old_))); // 18
real d_dt_P_O2 = ((k_plus_b*pow(Cass_old_,m)*P_O1_old_)-(k_minus_b*P_O2_old_)); // 20
real d_dt_P_C2 = ((k_plus_c*P_O1_old_)-(k_minus_c*P_C2_old_)); // 21
real d_dt_O = (((calc_alpha*C4_old_)+(Kpcb*I1_old_)+(0.001f*((calc_alpha*I2_old_)-(calc_Kpcf*O_old_))))-((4.0f*calc_beta*O_old_)+(calc_gamma*O_old_))); // 23
real d_dt_C2 = (((4.0f*calc_alpha*calc_C1)+(2.0f*calc_beta*C3_old_))-((calc_beta*C2_old_)+(3.0f*calc_alpha*C2_old_))); // 25
real d_dt_C3 = (((3.0f*calc_alpha*C2_old_)+(3.0f*calc_beta*C4_old_))-((2.0f*calc_beta*C3_old_)+(2.0f*calc_alpha*C3_old_))); // 26
real d_dt_C4 = (((2.0f*calc_alpha*C3_old_)+(4.0f*calc_beta*O_old_)+(0.01f*((4.0f*Kpcb*calc_beta*I1_old_)-(calc_alpha*calc_gamma*C4_old_)))+(0.002f*((4.0f*calc_beta*I2_old_)-(calc_Kpcf*C4_old_)))+(4.0f*calc_beta*Kpcb*I3_old_))-((3.0f*calc_beta*C4_old_)+(calc_alpha*C4_old_)+(1.0f*calc_gamma*calc_Kpcf*C4_old_))); // 27
real d_dt_I1 = (((calc_gamma*O_old_)+(0.001f*((calc_alpha*I3_old_)-(calc_Kpcf*I1_old_)))+(0.01f*((calc_alpha*calc_gamma*C4_old_)-(4.0f*calc_beta*Kpcb*I1_old_))))-(Kpcb*I1_old_)); // 28
real d_dt_I2 = (((0.001f*((calc_Kpcf*O_old_)-(calc_alpha*I2_old_)))+(Kpcb*I3_old_)+(0.002f*((calc_Kpcf*C4_old_)-(4.0f*calc_beta*I2_old_))))-(calc_gamma*I2_old_)); // 29
real d_dt_I3 = (((0.001f*((calc_Kpcf*I1_old_)-(calc_alpha*I3_old_)))+(calc_gamma*I2_old_)+(1.0f*calc_gamma*calc_Kpcf*C4_old_))-((4.0f*calc_beta*Kpcb*I3_old_)+(Kpcb*I3_old_))); // 30
real d_dt_Nai = (((-(calc_i_Na+calc_i_Nab+(3.0f*calc_i_NaK)+(3.0f*calc_i_NaCa)))*Acap*Cm)/(Vmyo*F)); // 39
real d_dt_C_Na2 = (((calc_alpha_Na11*calc_C_Na3)+(calc_beta_Na12*C_Na1_old_)+(calc_alpha_Na3*IC_Na2_old_))-((calc_beta_Na11*C_Na2_old_)+(calc_alpha_Na12*C_Na2_old_)+(calc_beta_Na3*C_Na2_old_))); // 43
real d_dt_C_Na1 = (((calc_alpha_Na12*C_Na2_old_)+(calc_beta_Na13*O_Na_old_)+(calc_alpha_Na3*IF_Na_old_))-((calc_beta_Na12*C_Na1_old_)+(calc_alpha_Na13*C_Na1_old_)+(calc_beta_Na3*C_Na1_old_))); // 44
real d_dt_O_Na = (((calc_alpha_Na13*C_Na1_old_)+(calc_beta_Na2*IF_Na_old_))-((calc_beta_Na13*O_Na_old_)+(calc_alpha_Na2*O_Na_old_))); // 45
real d_dt_IF_Na = (((calc_alpha_Na2*O_Na_old_)+(calc_beta_Na3*C_Na1_old_)+(calc_beta_Na4*I1_Na_old_)+(calc_alpha_Na12*IC_Na2_old_))-((calc_beta_Na2*IF_Na_old_)+(calc_alpha_Na3*IF_Na_old_)+(calc_alpha_Na4*IF_Na_old_)+(calc_beta_Na12*IF_Na_old_))); // 46
real d_dt_I1_Na = (((calc_alpha_Na4*IF_Na_old_)+(calc_beta_Na5*I2_Na_old_))-((calc_beta_Na4*I1_Na_old_)+(calc_alpha_Na5*I1_Na_old_))); // 47
real d_dt_I2_Na = ((calc_alpha_Na5*I1_Na_old_)-(calc_beta_Na5*I2_Na_old_)); // 48
real d_dt_IC_Na2 = (((calc_alpha_Na11*IC_Na3_old_)+(calc_beta_Na12*IF_Na_old_)+(calc_beta_Na3*C_Na2_old_))-((calc_beta_Na11*IC_Na2_old_)+(calc_alpha_Na12*IC_Na2_old_)+(calc_alpha_Na3*IC_Na2_old_))); // 49
real d_dt_IC_Na3 = (((calc_beta_Na11*IC_Na2_old_)+(calc_beta_Na3*calc_C_Na3))-((calc_alpha_Na11*IC_Na3_old_)+(calc_alpha_Na3*IC_Na3_old_))); // 50
real d_dt_Ki = (((-((calc_i_Kto_f+calc_i_Kto_s+calc_i_K1+calc_i_Ks+calc_i_Kss+calc_i_Kur+calc_i_Kr)-(2.0f*calc_i_NaK)))*Acap*Cm)/(Vmyo*F)); // 66
real d_dt_ato_f = ((calc_alpha_a*(1.0f-ato_f_old_))-(calc_beta_a*ato_f_old_)); // 69
real d_dt_ito_f = ((calc_alpha_i*(1.0f-ito_f_old_))-(calc_beta_i*ito_f_old_)); // 70
real d_dt_ato_s = ((calc_ass-ato_s_old_)/calc_tau_ta_s); // 76
real d_dt_ito_s = ((calc_iss-ito_s_old_)/calc_tau_ti_s); // 77
real d_dt_nKs = ((calc_alpha_n*(1.0f-nKs_old_))-(calc_beta_n*nKs_old_)); // 84
real d_dt_aur = ((calc_ass-aur_old_)/calc_tau_aur); // 88
real d_dt_iur = ((calc_iss-iur_old_)/calc_tau_iur); // 89
real d_dt_aKss = ((calc_ass-aKss_old_)/calc_tau_Kss); // 93
real d_dt_iKss = 0.0f; // 94
real d_dt_C_K2 = (((kf*C_K1_old_)+(calc_beta_a1*O_K_old_))-((kb*C_K2_old_)+(calc_alpha_a1*C_K2_old_))); // 98
real d_dt_C_K1 = (((calc_alpha_a0*calc_C_K0)+(kb*C_K2_old_))-((calc_beta_a0*C_K1_old_)+(kf*C_K1_old_))); // 99
real d_dt_O_K = (((calc_alpha_a1*C_K2_old_)+(calc_beta_i_duplicated_rapid_delayed_rectifier_potassium_current*I_K_old_))-((calc_beta_a1*O_K_old_)+(calc_alpha_i_duplicated_rapid_delayed_rectifier_potassium_current*O_K_old_))); // 100
real d_dt_I_K = ((calc_alpha_i_duplicated_rapid_delayed_rectifier_potassium_current*O_K_old_)-(calc_beta_i_duplicated_rapid_delayed_rectifier_potassium_current*I_K_old_)); // 101
rDY[0] = d_dt_V;
rDY[1] = d_dt_Cai;
rDY[2] = d_dt_Cass;
rDY[3] = d_dt_CaJSR;
rDY[4] = d_dt_CaNSR;
rDY[5] = d_dt_P_RyR;
rDY[6] = d_dt_LTRPN_Ca;
rDY[7] = d_dt_HTRPN_Ca;
rDY[8] = d_dt_P_O1;
rDY[9] = d_dt_P_O2;
rDY[10] = d_dt_P_C2;
rDY[11] = d_dt_O;
rDY[12] = d_dt_C2;
rDY[13] = d_dt_C3;
rDY[14] = d_dt_C4;
rDY[15] = d_dt_I1;
rDY[16] = d_dt_I2;
rDY[17] = d_dt_I3;
rDY[18] = d_dt_Nai;
rDY[19] = d_dt_C_Na2;
rDY[20] = d_dt_C_Na1;
rDY[21] = d_dt_O_Na;
rDY[22] = d_dt_IF_Na;
rDY[23] = d_dt_I1_Na;
rDY[24] = d_dt_I2_Na;
rDY[25] = d_dt_IC_Na2;
rDY[26] = d_dt_IC_Na3;
rDY[27] = d_dt_Ki;
rDY[28] = d_dt_ato_f;
rDY[29] = d_dt_ito_f;
rDY[30] = d_dt_ato_s;
rDY[31] = d_dt_ito_s;
rDY[32] = d_dt_nKs;
rDY[33] = d_dt_aur;
rDY[34] = d_dt_iur;
rDY[35] = d_dt_aKss;
rDY[36] = d_dt_iKss;
rDY[37] = d_dt_C_K2;
rDY[38] = d_dt_C_K1;
rDY[39] = d_dt_O_K;
rDY[40] = d_dt_I_K;
}
#include "../default_solvers.cu"
|
2316901dbe0db28081b25bc3ad137c6ded926796.cu
|
#include "../../gpu_utils/gpu_utils.h"
#include <stddef.h>
#include <stdint.h>
#include "bondarenko_2004.h"
__global__ void kernel_set_model_initial_conditions(real *sv, int num_volumes, size_t pitch, bool use_adpt_dt, real min_dt) {
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes) {
*((real * )((char *) sv + pitch * 0) + threadID) = -82.4202f; // V millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.115001; // Cai micromolar
*((real * )((char *) sv + pitch * 2) + threadID) = 0.115001; // Cass micromolar
*((real * )((char *) sv + pitch * 3) + threadID) = 1299.5; // CaJSR micromolar
*((real * )((char *) sv + pitch * 4) + threadID) = 1299.5; // CaNSR micromolar
*((real * )((char *) sv + pitch * 5) + threadID) = 0.0; // P_RyR dimensionless
*((real * )((char *) sv + pitch * 6) + threadID) = 11.2684; // LTRPN_Ca micromolar
*((real * )((char *) sv + pitch * 7) + threadID) = 125.29; // HTRPN_Ca micromolar
*((real * )((char *) sv + pitch * 8) + threadID) = 0.149102e-4; // P_O1 dimensionless
*((real * )((char *) sv + pitch * 9) + threadID) = 0.951726e-10; // P_O2 dimensionless
*((real * )((char *) sv + pitch * 10) + threadID) = 0.16774e-3; // P_C2 dimensionless
*((real * )((char *) sv + pitch * 11) + threadID) = 0.930308e-18; // O dimensionless
*((real * )((char *) sv + pitch * 12) + threadID) = 0.124216e-3; // C2 dimensionless
*((real * )((char *) sv + pitch * 13) + threadID) = 0.578679e-8; // C3 dimensionless
*((real * )((char *) sv + pitch * 14) + threadID) = 0.119816e-12; // C4 dimensionless
*((real * )((char *) sv + pitch * 15) + threadID) = 0.497923e-18; // I1 dimensionless
*((real * )((char *) sv + pitch * 16) + threadID) = 0.345847e-13; // I2 dimensionless
*((real * )((char *) sv + pitch * 17) + threadID) = 0.185106e-13; // I3 dimensionless
*((real * )((char *) sv + pitch * 18) + threadID) = 14237.1; // Nai micromolar
*((real * )((char *) sv + pitch * 19) + threadID) = 0.020752; // C_Na2 dimensionless
*((real * )((char *) sv + pitch * 20) + threadID) = 0.279132e-3; // C_Na1 dimensionless
*((real * )((char *) sv + pitch * 21) + threadID) = 0.713483e-6; // O_Na dimensionless
*((real * )((char *) sv + pitch * 22) + threadID) = 0.153176e-3; // IF_Na dimensionless
*((real * )((char *) sv + pitch * 23) + threadID) = 0.673345e-6; // I1_Na dimensionless
*((real * )((char *) sv + pitch * 24) + threadID) = 0.155787e-8; // I2_Na dimensionless
*((real * )((char *) sv + pitch * 25) + threadID) = 0.0113879; // IC_Na2 dimensionless
*((real * )((char *) sv + pitch * 26) + threadID) = 0.34278; // IC_Na3 dimensionless
*((real * )((char *) sv + pitch * 27) + threadID) = 143720.0; // Ki micromolar
*((real * )((char *) sv + pitch * 28) + threadID) = 0.265563e-2; // ato_f dimensionless
*((real * )((char *) sv + pitch * 29) + threadID) = 0.999977; // ito_f dimensionless
*((real * )((char *) sv + pitch * 30) + threadID) = 0.417069e-3; // ato_s dimensionless
*((real * )((char *) sv + pitch * 31) + threadID) = 0.998543; // ito_s dimensionless
*((real * )((char *) sv + pitch * 32) + threadID) = 0.262753e-3; // nKs dimensionless
*((real * )((char *) sv + pitch * 33) + threadID) = 0.417069e-3; // aur dimensionless
*((real * )((char *) sv + pitch * 34) + threadID) = 0.998543; // iur dimensionless
*((real * )((char *) sv + pitch * 35) + threadID) = 0.417069e-3; // aKss dimensionless
*((real * )((char *) sv + pitch * 36) + threadID) = 1.0; // iKss dimensionless
*((real * )((char *) sv + pitch * 37) + threadID) = 0.641229e-3; // C_K2 dimensionless
*((real * )((char *) sv + pitch * 38) + threadID) = 0.992513e-3; // C_K1 dimensionless
*((real * )((char *) sv + pitch * 39) + threadID) = 0.175298e-3; // O_K dimensionless
*((real * )((char *) sv + pitch * 40) + threadID) = 0.319129e-4; // I_K dimensionless
if(use_adpt_dt) {
*((real *)((char *)sv + pitch * NEQ) + threadID) = min_dt; // dt
*((real *)((char *)sv + pitch * (NEQ + 1)) + threadID) = 0.0; // time_new
*((real *)((char *)sv + pitch * (NEQ + 2)) + threadID) = 0.0; // previous dt
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY, real stim_current, int thread_id, real dt, size_t pitch, bool use_adpt_dt) {
// State variables
real V_old_;
real Cai_old_;
real Cass_old_;
real CaJSR_old_;
real CaNSR_old_;
real P_RyR_old_;
real LTRPN_Ca_old_;
real HTRPN_Ca_old_;
real P_O1_old_;
real P_O2_old_;
real P_C2_old_;
real O_old_;
real C2_old_;
real C3_old_;
real C4_old_;
real I1_old_;
real I2_old_;
real I3_old_;
real Nai_old_;
real C_Na2_old_;
real C_Na1_old_;
real O_Na_old_;
real IF_Na_old_;
real I1_Na_old_;
real I2_Na_old_;
real IC_Na2_old_;
real IC_Na3_old_;
real Ki_old_;
real ato_f_old_;
real ito_f_old_;
real ato_s_old_;
real ito_s_old_;
real nKs_old_;
real aur_old_;
real iur_old_;
real aKss_old_;
real iKss_old_;
real C_K2_old_;
real C_K1_old_;
real O_K_old_;
real I_K_old_;
if(use_adpt_dt) {
V_old_ = sv[0]; // initial value = -82.4202 millivolt
Cai_old_ = sv[1]; // initial value = 0.115001 micromolar
Cass_old_ = sv[2]; // initial value = 0.115001 micromolar
CaJSR_old_ = sv[3]; // initial value = 1299.5 micromolar
CaNSR_old_ = sv[4]; // initial value = 1299.5 micromolar
P_RyR_old_ = sv[5]; // initial value = 0 dimensionless
LTRPN_Ca_old_ = sv[6]; // initial value = 11.2684 micromolar
HTRPN_Ca_old_ = sv[7]; // initial value = 125.29 micromolar
P_O1_old_ = sv[8]; // initial value = 0.149102e-4 dimensionless
P_O2_old_ = sv[9]; // initial value = 0.951726e-10 dimensionless
P_C2_old_ = sv[10]; // initial value = 0.16774e-3 dimensionless
O_old_ = sv[11]; // initial value = 0.930308e-18 dimensionless
C2_old_ = sv[12]; // initial value = 0.124216e-3 dimensionless
C3_old_ = sv[13]; // initial value = 0.578679e-8 dimensionless
C4_old_ = sv[14]; // initial value = 0.119816e-12 dimensionless
I1_old_ = sv[15]; // initial value = 0.497923e-18 dimensionless
I2_old_ = sv[16]; // initial value = 0.345847e-13 dimensionless
I3_old_ = sv[17]; // initial value = 0.185106e-13 dimensionless
Nai_old_ = sv[18]; // initial value = 14237.1 micromolar
C_Na2_old_ = sv[19]; // initial value = 0.020752 dimensionless
C_Na1_old_ = sv[20]; // initial value = 0.279132e-3 dimensionless
O_Na_old_ = sv[21]; // initial value = 0.713483e-6 dimensionless
IF_Na_old_ = sv[22]; // initial value = 0.153176e-3 dimensionless
I1_Na_old_ = sv[23]; // initial value = 0.673345e-6 dimensionless
I2_Na_old_ = sv[24]; // initial value = 0.155787e-8 dimensionless
IC_Na2_old_ = sv[25]; // initial value = 0.0113879 dimensionless
IC_Na3_old_ = sv[26]; // initial value = 0.34278 dimensionless
Ki_old_ = sv[27]; // initial value = 143720 micromolar
ato_f_old_ = sv[28]; // initial value = 0.265563e-2 dimensionless
ito_f_old_ = sv[29]; // initial value = 0.999977 dimensionless
ato_s_old_ = sv[30]; // initial value = 0.417069e-3 dimensionless
ito_s_old_ = sv[31]; // initial value = 0.998543 dimensionless
nKs_old_ = sv[32]; // initial value = 0.262753e-3 dimensionless
aur_old_ = sv[33]; // initial value = 0.417069e-3 dimensionless
iur_old_ = sv[34]; // initial value = 0.998543 dimensionless
aKss_old_ = sv[35]; // initial value = 0.417069e-3 dimensionless
iKss_old_ = sv[36]; // initial value = 1 dimensionless
C_K2_old_ = sv[37]; // initial value = 0.641229e-3 dimensionless
C_K1_old_ = sv[38]; // initial value = 0.992513e-3 dimensionless
O_K_old_ = sv[39]; // initial value = 0.175298e-3 dimensionless
I_K_old_ = sv[40]; // initial value = 0.319129e-4 dimensionless
}
else {
V_old_ = *((real *)((char *)sv + pitch * 0) + thread_id); // initial value = -82.4202 millivolt
Cai_old_ = *((real *)((char *)sv + pitch * 1) + thread_id); // initial value = 0.115001 micromolar
Cass_old_ = *((real *)((char *)sv + pitch * 2) + thread_id); // initial value = 0.115001 micromolar
CaJSR_old_ = *((real *)((char *)sv + pitch * 3) + thread_id); // initial value = 1299.5 micromolar
CaNSR_old_ = *((real *)((char *)sv + pitch * 4) + thread_id); // initial value = 1299.5 micromolar
P_RyR_old_ = *((real *)((char *)sv + pitch * 5) + thread_id); // initial value = 0 dimensionless
LTRPN_Ca_old_ = *((real *)((char *)sv + pitch * 6) + thread_id); // initial value = 11.2684 micromolar
HTRPN_Ca_old_ = *((real *)((char *)sv + pitch * 7) + thread_id); // initial value = 125.29 micromolar
P_O1_old_ = *((real *)((char *)sv + pitch * 8) + thread_id); // initial value = 0.149102e-4 dimensionless
P_O2_old_ = *((real *)((char *)sv + pitch * 9) + thread_id); // initial value = 0.951726e-10 dimensionless
P_C2_old_ = *((real *)((char *)sv + pitch * 10) + thread_id); // initial value = 0.16774e-3 dimensionless
O_old_ = *((real *)((char *)sv + pitch * 11) + thread_id); // initial value = 0.930308e-18 dimensionless
C2_old_ = *((real *)((char *)sv + pitch * 12) + thread_id); // initial value = 0.124216e-3 dimensionless
C3_old_ = *((real *)((char *)sv + pitch * 13) + thread_id); // initial value = 0.578679e-8 dimensionless
C4_old_ = *((real *)((char *)sv + pitch * 14) + thread_id); // initial value = 0.119816e-12 dimensionless
I1_old_ = *((real *)((char *)sv + pitch * 15) + thread_id); // initial value = 0.497923e-18 dimensionless
I2_old_ = *((real *)((char *)sv + pitch * 16) + thread_id); // initial value = 0.345847e-13 dimensionless
I3_old_ = *((real *)((char *)sv + pitch * 17) + thread_id); // initial value = 0.185106e-13 dimensionless
Nai_old_ = *((real *)((char *)sv + pitch * 18) + thread_id); // initial value = 14237.1 micromolar
C_Na2_old_ = *((real *)((char *)sv + pitch * 19) + thread_id); // initial value = 0.020752 dimensionless
C_Na1_old_ = *((real *)((char *)sv + pitch * 20) + thread_id); // initial value = 0.279132e-3 dimensionless
O_Na_old_ = *((real *)((char *)sv + pitch * 21) + thread_id); // initial value = 0.713483e-6 dimensionless
IF_Na_old_ = *((real *)((char *)sv + pitch * 22) + thread_id); // initial value = 0.153176e-3 dimensionless
I1_Na_old_ = *((real *)((char *)sv + pitch * 23) + thread_id); // initial value = 0.673345e-6 dimensionless
I2_Na_old_ = *((real *)((char *)sv + pitch * 24) + thread_id); // initial value = 0.155787e-8 dimensionless
IC_Na2_old_ = *((real *)((char *)sv + pitch * 25) + thread_id); // initial value = 0.0113879 dimensionless
IC_Na3_old_ = *((real *)((char *)sv + pitch * 26) + thread_id); // initial value = 0.34278 dimensionless
Ki_old_ = *((real *)((char *)sv + pitch * 27) + thread_id); // initial value = 143720 micromolar
ato_f_old_ = *((real *)((char *)sv + pitch * 28) + thread_id); // initial value = 0.265563e-2 dimensionless
ito_f_old_ = *((real *)((char *)sv + pitch * 29) + thread_id); // initial value = 0.999977 dimensionless
ato_s_old_ = *((real *)((char *)sv + pitch * 30) + thread_id); // initial value = 0.417069e-3 dimensionless
ito_s_old_ = *((real *)((char *)sv + pitch * 31) + thread_id); // initial value = 0.998543 dimensionless
nKs_old_ = *((real *)((char *)sv + pitch * 32) + thread_id); // initial value = 0.262753e-3 dimensionless
aur_old_ = *((real *)((char *)sv + pitch * 33) + thread_id); // initial value = 0.417069e-3 dimensionless
iur_old_ = *((real *)((char *)sv + pitch * 34) + thread_id); // initial value = 0.998543 dimensionless
aKss_old_ = *((real *)((char *)sv + pitch * 35) + thread_id); // initial value = 0.417069e-3 dimensionless
iKss_old_ = *((real *)((char *)sv + pitch * 36) + thread_id); // initial value = 1 dimensionless
C_K2_old_ = *((real *)((char *)sv + pitch * 37) + thread_id); // initial value = 0.641229e-3 dimensionless
C_K1_old_ = *((real *)((char *)sv + pitch * 38) + thread_id); // initial value = 0.992513e-3 dimensionless
O_K_old_ = *((real *)((char *)sv + pitch * 39) + thread_id); // initial value = 0.175298e-3 dimensionless
I_K_old_ = *((real *)((char *)sv + pitch * 40) + thread_id); // initial value = 0.319129e-4 dimensionless
}
// Parameters
const real Acap = 1.534e-4f; // cm2
const real Cm = 1.0f; // microF_per_cm2
const real Vmyo = 25.84e-6f; // microlitre
const real F = 96.5f; // coulomb_per_millimole
const real VJSR = 0.12e-6f; // microlitre
const real Vss = 1.485e-9f; // microlitre
const real VNSR = 2.098e-6f; // microlitre
const real CMDN_tot = 50.0f; // micromolar
const real Km_CMDN = 0.238f; // micromolar
const real CSQN_tot = 15000.0f; // micromolar
const real Km_CSQN = 800.0f; // micromolar
const real v1 = 4.5f; // per_millisecond
const real tau_tr = 20.0f; // millisecond
const real tau_xfer = 8.0f; // millisecond
const real v2 = 1.74e-5f; // per_millisecond
const real v3 = 0.45f; // micromolar_per_millisecond
const real Km_up = 0.5f; // micromolar
const real k_plus_htrpn = 0.00237f; // per_micromolar_millisecond
const real HTRPN_tot = 140.0f; // micromolar
const real k_plus_ltrpn = 0.0327f; // per_micromolar_millisecond
const real LTRPN_tot = 70.0f; // micromolar
const real k_minus_htrpn = 3.2e-5f; // per_millisecond
const real k_minus_ltrpn = 0.0196f; // per_millisecond
const real i_CaL_max = 7.0f; // picoA_per_picoF
const real k_plus_a = 0.006075f; // micromolar4_per_millisecond
const real n = 4.0f; // dimensionless
const real k_minus_b = 0.965f; // per_millisecond
const real k_minus_c = 0.0008f; // per_millisecond
const real k_minus_a = 0.07125f; // per_millisecond
const real k_plus_b = 0.00405f; // micromolar3_per_millisecond
const real m = 3.0f; // dimensionless
const real k_plus_c = 0.009f; // per_millisecond
const real g_CaL = 0.1729f; // milliS_per_microF
const real E_CaL = 63.0f; // millivolt
const real Kpcb = 0.0005f; // per_millisecond
const real Kpc_max = 0.23324f; // per_millisecond
const real Kpc_half = 20.0f; // micromolar
const real i_pCa_max = 1.0f; // picoA_per_picoF
const real Km_pCa = 0.5f; // micromolar
const real k_NaCa = 292.8f; // picoA_per_picoF
const real K_mNa = 87500.0f; // micromolar
const real Nao = 140000.0f; // micromolar
const real K_mCa = 1380.0f; // micromolar
const real Cao = 1800.0f; // micromolar
const real k_sat = 0.1f; // dimensionless
const real eta = 0.35f; // dimensionless
const real R = 8.314f; // joule_per_mole_kelvin
const real T = 298.0f; // kelvin
const real g_Cab = 0.000367f; // milliS_per_microF
const real g_Na = 13.0f; // milliS_per_microF
const real Ko = 5400.0f; // micromolar
const real g_Nab = 0.0026f; // milliS_per_microF
const real g_Kto_f = 0.4067f; // milliS_per_microF
const real g_Kto_s = 0.0f; // milliS_per_microF
const real g_Ks = 0.00575f; // milliS_per_microF
const real g_Kur = 0.16f; // milliS_per_microF
const real g_Kss = 0.05f; // milliS_per_microF
const real g_Kr = 0.078f; // milliS_per_microF
const real kf = 0.023761f; // per_millisecond
const real kb = 0.036778f; // per_millisecond
const real i_NaK_max = 0.88f; // picoA_per_picoF
const real Km_Nai = 21000.0f; // micromolar
const real Km_Ko = 1500.0f; // micromolar
const real g_ClCa = 10.0f; // milliS_per_microF
const real Km_Cl = 10.0f; // micromolar
const real E_Cl = -40.0f; // millivolt
// Algebraic Equations
real calc_i_stim = stim_current; //0
real calc_Bi = pow((1.0f+((CMDN_tot*Km_CMDN)/pow((Km_CMDN+Cai_old_),2.0f))),(-1.0f)); //6
real calc_Bss = pow((1.0f+((CMDN_tot*Km_CMDN)/pow((Km_CMDN+Cass_old_),2.0f))),(-1.0f)); //7
real calc_BJSR = pow((1.0f+((CSQN_tot*Km_CSQN)/pow((Km_CSQN+CaJSR_old_),2.0f))),(-1.0f)); //8
real calc_J_rel = (v1*(P_O1_old_+P_O2_old_)*(CaJSR_old_-Cass_old_)*P_RyR_old_); //9
real calc_J_tr = ((CaNSR_old_-CaJSR_old_)/tau_tr); //10
real calc_J_xfer = ((Cass_old_-Cai_old_)/tau_xfer); //11
real calc_J_leak = (v2*(CaNSR_old_-Cai_old_)); //12
real calc_J_up = ((v3*pow(Cai_old_,2.0f))/(pow(Km_up,2.0f)+pow(Cai_old_,2.0f))); //13
real calc_J_trpn = (((k_plus_htrpn*Cai_old_*(HTRPN_tot-HTRPN_Ca_old_))+(k_plus_ltrpn*Cai_old_*(LTRPN_tot-LTRPN_Ca_old_)))-((k_minus_htrpn*HTRPN_Ca_old_)+(k_minus_ltrpn*LTRPN_Ca_old_))); //14
real calc_P_C1 = (1.0f-(P_C2_old_+P_O1_old_+P_O2_old_)); //19
real calc_i_CaL = (g_CaL*O_old_*(V_old_-E_CaL)); //22
real calc_C1 = (1.0f-(O_old_+C2_old_+C3_old_+C4_old_+I1_old_+I2_old_+I3_old_)); //24
real calc_alpha = ((0.4f*exp(((V_old_+12.0f)/10.0f))*((1.0f+(0.7f*exp(((-pow((V_old_+40.0f),2.0f))/10.0f))))-(0.75f*exp(((-pow((V_old_+20.0f),2.0f))/400.0f)))))/(1.0f+(0.12f*exp(((V_old_+12.0f)/10.0f))))); //31
real calc_beta = (0.05f*exp(((-(V_old_+12.0f))/13.0f))); //32
real calc_gamma = ((Kpc_max*Cass_old_)/(Kpc_half+Cass_old_)); //33
real calc_Kpcf = (13.0f*(1.0f-exp(((-pow((V_old_+14.5f),2.0f))/100.0f)))); //34
real calc_i_pCa = ((i_pCa_max*pow(Cai_old_,2.0f))/(pow(Km_pCa,2.0f)+pow(Cai_old_,2.0f))); //35
real calc_i_NaCa = (((((((k_NaCa*1.0f)/(pow(K_mNa,3.0)+pow(Nao,3.0)))*1.0f)/(K_mCa+Cao))*1.0f)/(1.0f+(k_sat*exp((((eta-1.0f)*V_old_*F)/(R*T))))))*((exp(((eta*V_old_*F)/(R*T)))*pow(Nai_old_,3.0)*Cao)-(exp((((eta-1.0f)*V_old_*F)/(R*T)))*pow(Nao,3.0)*Cai_old_))); //36
real calc_E_CaN = (((R*T)/(2.0f*F))*log((Cao/Cai_old_))); //38
real calc_E_Na = (((R*T)/F)*log((((0.9f*Nao)+(0.1f*Ko))/((0.9f*Nai_old_)+(0.1f*Ki_old_))))); //41
real calc_C_Na3 = (1.0f-(O_Na_old_+C_Na1_old_+C_Na2_old_+IF_Na_old_+I1_Na_old_+I2_Na_old_+IC_Na2_old_+IC_Na3_old_)); //42
real calc_alpha_Na11 = (3.802f/((0.1027f*exp(((-(V_old_+2.5f))/17.0f)))+(0.2f*exp(((-(V_old_+2.5f))/150.0f))))); //51
real calc_alpha_Na12 = (3.802f/((0.1027f*exp(((-(V_old_+2.5f))/15.0f)))+(0.23f*exp(((-(V_old_+2.5f))/150.0f))))); //52
real calc_alpha_Na13 = (3.802f/((0.1027f*exp(((-(V_old_+2.5f))/12.0f)))+(0.25f*exp(((-(V_old_+2.5f))/150.0f))))); //53
real calc_beta_Na11 = (0.1917f*exp(((-(V_old_+2.5f))/20.3f))); //54
real calc_beta_Na12 = (0.2f*exp(((-(V_old_-2.5f))/20.3f))); //55
real calc_beta_Na13 = (0.22f*exp(((-(V_old_-7.5f))/20.3f))); //56
real calc_alpha_Na3 = (7e-7f*exp(((-(V_old_+7.0f))/7.7f))); //57
real calc_beta_Na3 = (0.00854f+(0.00002f*V_old_)); //58
real calc_alpha_Na2 = (1.0f/((0.188495f*exp(((-(V_old_+7.0f))/16.6f)))+0.393956f)); //59
real calc_E_K = (((R*T)/F)*log((Ko/Ki_old_))); //68
real calc_alpha_a = (0.18064f*exp((0.03577f*(V_old_+ 30.0f)))); //71
real calc_beta_a = (0.3956f*exp(((-0.06237f)*(V_old_+ 30.0f)))); //72
real calc_alpha_i = ((0.000152f*exp(((-(V_old_+13.5f))/7.0f)))/((0.067083f*exp(((-(V_old_+33.5f))/7.0f)))+1.0f)); //73
real calc_beta_i = ((0.00095f*exp(((V_old_+33.5f)/7.0f)))/((0.051335f*exp(((V_old_+33.5f)/7.0f)))+1.0f)); //74
real calc_ass = (1.0f/(1.0f+exp(((-(V_old_+22.5f))/7.7f)))); //78
real calc_iss = (1.0f/(1.0f+exp(((V_old_+45.2f)/5.7f)))); //79
real calc_tau_ta_s = ((0.493f*exp(((-0.0629f)*V_old_)))+2.058f); //80
real calc_tau_ti_s = (270.0f+(1050.0f/(1.0f+exp(((V_old_+45.2f)/5.7f))))); //81
real calc_alpha_n = (V_old_ != -26.5f)?((0.00000481333f*(V_old_+26.5f))/(1.0f-exp(((-0.128f)*(V_old_+26.5f))))): 0.000037604f; //85
real calc_beta_n = (0.0000953333f*exp(((-0.038f)*(V_old_+26.5f)))); //86
real calc_tau_aur = ((0.493f*exp(((-0.0629f)*V_old_)))+2.058f); //90
real calc_tau_iur = (1200.0f-(170.0f/(1.0f+exp(((V_old_+45.2f)/5.7f))))); //91
real calc_tau_Kss = ((39.3f*exp(((-0.0862f)*V_old_)))+13.17f); //95
real calc_i_Kr = (g_Kr*O_K_old_*(V_old_-(((R*T)/F)*log((((0.98f*Ko)+(0.02f*Nao))/((0.98f*Ki_old_)+(0.02f*Nai_old_))))))); //96
real calc_C_K0 = (1.0f-(C_K1_old_+C_K2_old_+O_K_old_+I_K_old_)); //97
real calc_alpha_a0 = (0.022348f*exp((0.01176f*V_old_))); //102
real calc_beta_a0 = (0.047002f*exp(((-0.0631f)*V_old_))); //103
real calc_alpha_a1 = (0.013733f*exp((0.038198f*V_old_))); //104
real calc_beta_a1 = (0.0000689f*exp(((-0.04178f)*V_old_))); //105
real calc_alpha_i_duplicated_rapid_delayed_rectifier_potassium_current = (0.090821f*exp((0.023391f*(V_old_+5.0f)))); //106
real calc_beta_i_duplicated_rapid_delayed_rectifier_potassium_current = (0.006497f*exp(((-0.03268f)*(V_old_+5.0f)))); //107
real calc_sigma = ((1.0f/7.0f)*(exp((Nao/67300.0f))-1.0f)); //110
real calc_O_ClCa = (0.2f/(1.0f+exp(((-(V_old_-46.7f))/7.8f)))); //112
real calc_beta_Na2 = ((calc_alpha_Na13*calc_alpha_Na2*calc_alpha_Na3)/(calc_beta_Na13*calc_beta_Na3)); //60
real calc_alpha_Na4 = (calc_alpha_Na2/1000.0f); //61
real calc_beta_Na4 = calc_alpha_Na3; //62
real calc_alpha_Na5 = (calc_alpha_Na2/95000.0f); //63
real calc_beta_Na5 = (calc_alpha_Na3/50.0f); //64
real calc_i_Nab = (g_Nab*(V_old_-calc_E_Na)); //65
real calc_i_Kto_s = (g_Kto_s*ato_s_old_*ito_s_old_*(V_old_-calc_E_K)); //75
real calc_i_K1 = ((((0.2938f*Ko)/(Ko+210.0f))*(V_old_-calc_E_K))/(1.0f+exp((0.0896f*(V_old_-calc_E_K))))); //82
real calc_i_Ks = (g_Ks*pow(nKs_old_,2.0f)*(V_old_-calc_E_K)); //83
real calc_i_Kur = (g_Kur*aur_old_*iur_old_*(V_old_-calc_E_K)); //87
real calc_i_Kss = (g_Kss*aKss_old_*iKss_old_*(V_old_-calc_E_K)); //92
real calc_i_Cab = (g_Cab*(V_old_-calc_E_CaN)); //37
real calc_i_Na = (g_Na*O_Na_old_*(V_old_-calc_E_Na)); //40
real calc_i_Kto_f = (g_Kto_f*pow(ato_f_old_,3.0)*ito_f_old_*(V_old_-calc_E_K)); //67
real calc_f_NaK = (1.0f/(1.0f+(0.1245f*exp((((-0.1f)*V_old_*F)/(R*T))))+(0.0365f*calc_sigma*exp((((-V_old_)*F)/(R*T)))))); //109
real calc_i_ClCa = (((g_ClCa*calc_O_ClCa*Cai_old_)/(Cai_old_+Km_Cl))*(V_old_-E_Cl)); //111
real calc_i_NaK = ((((i_NaK_max*calc_f_NaK*1.0f)/(1.0f+pow((Km_Nai/Nai_old_),1.5)))*Ko)/(Ko+Km_Ko)); //108
// Differential Equations
real d_dt_V = (-(calc_i_CaL+calc_i_pCa+calc_i_NaCa+calc_i_Cab+calc_i_Na+calc_i_Nab+calc_i_NaK+calc_i_Kto_f+calc_i_Kto_s+calc_i_K1+calc_i_Ks+calc_i_Kur+calc_i_Kss+calc_i_Kr+calc_i_ClCa+calc_i_stim)); // 1
real d_dt_Cai = (calc_Bi*((calc_J_leak+calc_J_xfer)-(calc_J_up+calc_J_trpn+((((calc_i_Cab+calc_i_pCa)-(2.0f*calc_i_NaCa))*Acap*Cm)/(2.0f*Vmyo*F))))); // 2
real d_dt_Cass = (calc_Bss*(((calc_J_rel*VJSR)/Vss)-(((calc_J_xfer*Vmyo)/Vss)+((calc_i_CaL*Acap*Cm)/(2.0f*Vss*F))))); // 3
real d_dt_CaJSR = (calc_BJSR*(calc_J_tr-calc_J_rel)); // 4
real d_dt_CaNSR = ((((calc_J_up-calc_J_leak)*Vmyo)/VNSR)-((calc_J_tr*VJSR)/VNSR)); // 5
real d_dt_P_RyR = (((-0.04f)*P_RyR_old_)-(((0.1f*calc_i_CaL)/i_CaL_max)*exp(((-pow((V_old_-5.0f),2.0f))/648.0f)))); // 15
real d_dt_LTRPN_Ca = ((k_plus_ltrpn*Cai_old_*(LTRPN_tot-LTRPN_Ca_old_))-(k_minus_ltrpn*LTRPN_Ca_old_)); // 16
real d_dt_HTRPN_Ca = ((k_plus_htrpn*Cai_old_*(HTRPN_tot-HTRPN_Ca_old_))-(k_minus_htrpn*HTRPN_Ca_old_)); // 17
real d_dt_P_O1 = (((k_plus_a*pow(Cass_old_,n)*calc_P_C1)+(k_minus_b*P_O2_old_)+(k_minus_c*P_C2_old_))-((k_minus_a*P_O1_old_)+(k_plus_b*pow(Cass_old_,m)*P_O1_old_)+(k_plus_c*P_O1_old_))); // 18
real d_dt_P_O2 = ((k_plus_b*pow(Cass_old_,m)*P_O1_old_)-(k_minus_b*P_O2_old_)); // 20
real d_dt_P_C2 = ((k_plus_c*P_O1_old_)-(k_minus_c*P_C2_old_)); // 21
real d_dt_O = (((calc_alpha*C4_old_)+(Kpcb*I1_old_)+(0.001f*((calc_alpha*I2_old_)-(calc_Kpcf*O_old_))))-((4.0f*calc_beta*O_old_)+(calc_gamma*O_old_))); // 23
real d_dt_C2 = (((4.0f*calc_alpha*calc_C1)+(2.0f*calc_beta*C3_old_))-((calc_beta*C2_old_)+(3.0f*calc_alpha*C2_old_))); // 25
real d_dt_C3 = (((3.0f*calc_alpha*C2_old_)+(3.0f*calc_beta*C4_old_))-((2.0f*calc_beta*C3_old_)+(2.0f*calc_alpha*C3_old_))); // 26
real d_dt_C4 = (((2.0f*calc_alpha*C3_old_)+(4.0f*calc_beta*O_old_)+(0.01f*((4.0f*Kpcb*calc_beta*I1_old_)-(calc_alpha*calc_gamma*C4_old_)))+(0.002f*((4.0f*calc_beta*I2_old_)-(calc_Kpcf*C4_old_)))+(4.0f*calc_beta*Kpcb*I3_old_))-((3.0f*calc_beta*C4_old_)+(calc_alpha*C4_old_)+(1.0f*calc_gamma*calc_Kpcf*C4_old_))); // 27
real d_dt_I1 = (((calc_gamma*O_old_)+(0.001f*((calc_alpha*I3_old_)-(calc_Kpcf*I1_old_)))+(0.01f*((calc_alpha*calc_gamma*C4_old_)-(4.0f*calc_beta*Kpcb*I1_old_))))-(Kpcb*I1_old_)); // 28
real d_dt_I2 = (((0.001f*((calc_Kpcf*O_old_)-(calc_alpha*I2_old_)))+(Kpcb*I3_old_)+(0.002f*((calc_Kpcf*C4_old_)-(4.0f*calc_beta*I2_old_))))-(calc_gamma*I2_old_)); // 29
real d_dt_I3 = (((0.001f*((calc_Kpcf*I1_old_)-(calc_alpha*I3_old_)))+(calc_gamma*I2_old_)+(1.0f*calc_gamma*calc_Kpcf*C4_old_))-((4.0f*calc_beta*Kpcb*I3_old_)+(Kpcb*I3_old_))); // 30
real d_dt_Nai = (((-(calc_i_Na+calc_i_Nab+(3.0f*calc_i_NaK)+(3.0f*calc_i_NaCa)))*Acap*Cm)/(Vmyo*F)); // 39
real d_dt_C_Na2 = (((calc_alpha_Na11*calc_C_Na3)+(calc_beta_Na12*C_Na1_old_)+(calc_alpha_Na3*IC_Na2_old_))-((calc_beta_Na11*C_Na2_old_)+(calc_alpha_Na12*C_Na2_old_)+(calc_beta_Na3*C_Na2_old_))); // 43
real d_dt_C_Na1 = (((calc_alpha_Na12*C_Na2_old_)+(calc_beta_Na13*O_Na_old_)+(calc_alpha_Na3*IF_Na_old_))-((calc_beta_Na12*C_Na1_old_)+(calc_alpha_Na13*C_Na1_old_)+(calc_beta_Na3*C_Na1_old_))); // 44
real d_dt_O_Na = (((calc_alpha_Na13*C_Na1_old_)+(calc_beta_Na2*IF_Na_old_))-((calc_beta_Na13*O_Na_old_)+(calc_alpha_Na2*O_Na_old_))); // 45
real d_dt_IF_Na = (((calc_alpha_Na2*O_Na_old_)+(calc_beta_Na3*C_Na1_old_)+(calc_beta_Na4*I1_Na_old_)+(calc_alpha_Na12*IC_Na2_old_))-((calc_beta_Na2*IF_Na_old_)+(calc_alpha_Na3*IF_Na_old_)+(calc_alpha_Na4*IF_Na_old_)+(calc_beta_Na12*IF_Na_old_))); // 46
real d_dt_I1_Na = (((calc_alpha_Na4*IF_Na_old_)+(calc_beta_Na5*I2_Na_old_))-((calc_beta_Na4*I1_Na_old_)+(calc_alpha_Na5*I1_Na_old_))); // 47
real d_dt_I2_Na = ((calc_alpha_Na5*I1_Na_old_)-(calc_beta_Na5*I2_Na_old_)); // 48
real d_dt_IC_Na2 = (((calc_alpha_Na11*IC_Na3_old_)+(calc_beta_Na12*IF_Na_old_)+(calc_beta_Na3*C_Na2_old_))-((calc_beta_Na11*IC_Na2_old_)+(calc_alpha_Na12*IC_Na2_old_)+(calc_alpha_Na3*IC_Na2_old_))); // 49
real d_dt_IC_Na3 = (((calc_beta_Na11*IC_Na2_old_)+(calc_beta_Na3*calc_C_Na3))-((calc_alpha_Na11*IC_Na3_old_)+(calc_alpha_Na3*IC_Na3_old_))); // 50
real d_dt_Ki = (((-((calc_i_Kto_f+calc_i_Kto_s+calc_i_K1+calc_i_Ks+calc_i_Kss+calc_i_Kur+calc_i_Kr)-(2.0f*calc_i_NaK)))*Acap*Cm)/(Vmyo*F)); // 66
real d_dt_ato_f = ((calc_alpha_a*(1.0f-ato_f_old_))-(calc_beta_a*ato_f_old_)); // 69
real d_dt_ito_f = ((calc_alpha_i*(1.0f-ito_f_old_))-(calc_beta_i*ito_f_old_)); // 70
real d_dt_ato_s = ((calc_ass-ato_s_old_)/calc_tau_ta_s); // 76
real d_dt_ito_s = ((calc_iss-ito_s_old_)/calc_tau_ti_s); // 77
real d_dt_nKs = ((calc_alpha_n*(1.0f-nKs_old_))-(calc_beta_n*nKs_old_)); // 84
real d_dt_aur = ((calc_ass-aur_old_)/calc_tau_aur); // 88
real d_dt_iur = ((calc_iss-iur_old_)/calc_tau_iur); // 89
real d_dt_aKss = ((calc_ass-aKss_old_)/calc_tau_Kss); // 93
real d_dt_iKss = 0.0f; // 94
real d_dt_C_K2 = (((kf*C_K1_old_)+(calc_beta_a1*O_K_old_))-((kb*C_K2_old_)+(calc_alpha_a1*C_K2_old_))); // 98
real d_dt_C_K1 = (((calc_alpha_a0*calc_C_K0)+(kb*C_K2_old_))-((calc_beta_a0*C_K1_old_)+(kf*C_K1_old_))); // 99
real d_dt_O_K = (((calc_alpha_a1*C_K2_old_)+(calc_beta_i_duplicated_rapid_delayed_rectifier_potassium_current*I_K_old_))-((calc_beta_a1*O_K_old_)+(calc_alpha_i_duplicated_rapid_delayed_rectifier_potassium_current*O_K_old_))); // 100
real d_dt_I_K = ((calc_alpha_i_duplicated_rapid_delayed_rectifier_potassium_current*O_K_old_)-(calc_beta_i_duplicated_rapid_delayed_rectifier_potassium_current*I_K_old_)); // 101
rDY[0] = d_dt_V;
rDY[1] = d_dt_Cai;
rDY[2] = d_dt_Cass;
rDY[3] = d_dt_CaJSR;
rDY[4] = d_dt_CaNSR;
rDY[5] = d_dt_P_RyR;
rDY[6] = d_dt_LTRPN_Ca;
rDY[7] = d_dt_HTRPN_Ca;
rDY[8] = d_dt_P_O1;
rDY[9] = d_dt_P_O2;
rDY[10] = d_dt_P_C2;
rDY[11] = d_dt_O;
rDY[12] = d_dt_C2;
rDY[13] = d_dt_C3;
rDY[14] = d_dt_C4;
rDY[15] = d_dt_I1;
rDY[16] = d_dt_I2;
rDY[17] = d_dt_I3;
rDY[18] = d_dt_Nai;
rDY[19] = d_dt_C_Na2;
rDY[20] = d_dt_C_Na1;
rDY[21] = d_dt_O_Na;
rDY[22] = d_dt_IF_Na;
rDY[23] = d_dt_I1_Na;
rDY[24] = d_dt_I2_Na;
rDY[25] = d_dt_IC_Na2;
rDY[26] = d_dt_IC_Na3;
rDY[27] = d_dt_Ki;
rDY[28] = d_dt_ato_f;
rDY[29] = d_dt_ito_f;
rDY[30] = d_dt_ato_s;
rDY[31] = d_dt_ito_s;
rDY[32] = d_dt_nKs;
rDY[33] = d_dt_aur;
rDY[34] = d_dt_iur;
rDY[35] = d_dt_aKss;
rDY[36] = d_dt_iKss;
rDY[37] = d_dt_C_K2;
rDY[38] = d_dt_C_K1;
rDY[39] = d_dt_O_K;
rDY[40] = d_dt_I_K;
}
#include "../default_solvers.cu"
|
4f29ac5128d9f0afda6a9526f7bf17655edd523c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "sgp4CUDA.cuh"
#include "commonCUDA.cuh"
#include "tle.h"
#include "sgp4initKernel.cu"
#include "sgp4Kernel.cu"
t_var currenttime = 0.0;
struct satelliterecord_soa_t *d_satrec, *h_satrec;
void initSGP4CUDA( gravconsttype whichconst, std::vector<satelliterecord_aos_t> &SatRecAoS, int numberSatellites ){
//Set Gravitational Constants
gravconstant_t gravconstant;
setGravConstant(whichconst, gravconstant);
hipMemcpyToSymbol("gravity_constants", &gravconstant, sizeof(gravconstant_t), 0, hipMemcpyHostToDevice);
satelliterecord_soa_t *SatRecSoA = (satelliterecord_soa_t*) malloc(sizeof(satelliterecord_soa_t) * numberSatellites);
satelliteRecordConvert(SatRecAoS, SatRecSoA);
cutilSafeCall(hipMalloc((void **) &d_satrec, sizeof(satelliterecord_soa_t) * numberSatellites));
cutilSafeCall(hipMemcpy(d_satrec, SatRecSoA, sizeof(satelliterecord_soa_t) * numberSatellites, hipMemcpyHostToDevice));
free(SatRecSoA);
dim3 threadsperblock( NUM_THREADS , 1 );
dim3 blockspergrid( numberSatellites / NUM_THREADS + (!(numberSatellites % NUM_THREADS) ? 0 : 1), 1 , 1);
hipLaunchKernelGGL(( sgp4initkernel), dim3(blockspergrid), dim3(threadsperblock) , 0, 0, d_satrec, numberSatellites);
hipError_t STATUS = hipGetLastError();
}
void ComputeSGP4CUDA( float4 *positions, t_var deltatime, int numberSatellites ){
currenttime += deltatime;
dim3 threadsperblock( NUM_THREADS , 1 );
dim3 blockspergrid( numberSatellites / NUM_THREADS + (!(numberSatellites % NUM_THREADS) ? 0 : 1), 1 , 1);
hipLaunchKernelGGL(( sgp4), dim3(blockspergrid), dim3(threadsperblock) , 0, 0, d_satrec, numberSatellites, currenttime, positions);
}
void FreeVariables(){
cutilSafeCall(hipFree(d_satrec));
}
|
4f29ac5128d9f0afda6a9526f7bf17655edd523c.cu
|
//#include "sgp4CUDA.cuh"
#include "commonCUDA.cuh"
#include "tle.h"
#include "sgp4initKernel.cu"
#include "sgp4Kernel.cu"
t_var currenttime = 0.0;
struct satelliterecord_soa_t *d_satrec, *h_satrec;
void initSGP4CUDA( gravconsttype whichconst, std::vector<satelliterecord_aos_t> &SatRecAoS, int numberSatellites ){
//Set Gravitational Constants
gravconstant_t gravconstant;
setGravConstant(whichconst, gravconstant);
cudaMemcpyToSymbol("gravity_constants", &gravconstant, sizeof(gravconstant_t), 0, cudaMemcpyHostToDevice);
satelliterecord_soa_t *SatRecSoA = (satelliterecord_soa_t*) malloc(sizeof(satelliterecord_soa_t) * numberSatellites);
satelliteRecordConvert(SatRecAoS, SatRecSoA);
cutilSafeCall(cudaMalloc((void **) &d_satrec, sizeof(satelliterecord_soa_t) * numberSatellites));
cutilSafeCall(cudaMemcpy(d_satrec, SatRecSoA, sizeof(satelliterecord_soa_t) * numberSatellites, cudaMemcpyHostToDevice));
free(SatRecSoA);
dim3 threadsperblock( NUM_THREADS , 1 );
dim3 blockspergrid( numberSatellites / NUM_THREADS + (!(numberSatellites % NUM_THREADS) ? 0 : 1), 1 , 1);
sgp4initkernel<<< blockspergrid, threadsperblock >>>(d_satrec, numberSatellites);
cudaError_t STATUS = cudaGetLastError();
}
void ComputeSGP4CUDA( float4 *positions, t_var deltatime, int numberSatellites ){
currenttime += deltatime;
dim3 threadsperblock( NUM_THREADS , 1 );
dim3 blockspergrid( numberSatellites / NUM_THREADS + (!(numberSatellites % NUM_THREADS) ? 0 : 1), 1 , 1);
sgp4<<< blockspergrid, threadsperblock >>>(d_satrec, numberSatellites, currenttime, positions);
}
void FreeVariables(){
cutilSafeCall(cudaFree(d_satrec));
}
|
cccf980c77103cf64f7f114377fb6a5f662f62d4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <string>
#include "triangle.cuh"
#include "slicer.cuh"
#include "golden.cuh"
#include <vector>
#include <chrono>
#define NOW (std::chrono::high_resolution_clock::now())
typedef std::chrono::time_point<std::chrono::high_resolution_clock> chrono_t;
void timer_checkpoint(chrono_t & checkpoint) {
#ifdef TEST
chrono_t end = NOW;
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - checkpoint);
std::cout << duration.count() << "ms" << std::endl;
checkpoint = end;
#else
std::cout << std::endl;
#endif
}
void checkCudaError() {
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
std::cout << "CUDA error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
}
int main(int argc, char* argv[]) {
std::string stl_file_name;
std::vector<triangle> triangles;
std::vector<std::vector<double>> point_array(9);
if (argc == 2) {
stl_file_name = argv[1];
} else if (argc > 2) {
std::cout << "ERROR: Too many command line arguments" << std::endl;
}
chrono_t start = NOW;
load_point_array(stl_file_name, point_array, triangles);
std::cout << "Reading STL file... ";
timer_checkpoint(start);
std::cout << "Allocating device memory... ";
int num_triangles = triangles.size();
triangle* triangles_dev;
double* points_dev;
// all[z][y][x]
#ifdef TEST
bool* all = (bool*)malloc(NUM_LAYERS * Y_DIM * X_DIM * sizeof(bool));
#else
bool* all = (bool*)malloc(BLOCK_HEIGHT * Y_DIM * X_DIM * sizeof(bool));
#endif
bool* all_dev;
size_t size = BLOCK_HEIGHT * Y_DIM * X_DIM * sizeof(bool);
hipMalloc(&all_dev, size);
hipMemset(all_dev, 0, size);
hipMalloc(&triangles_dev, num_triangles * sizeof(triangle));
hipMemcpy(triangles_dev, triangles.data(), num_triangles * sizeof(triangle), hipMemcpyHostToDevice);
hipMalloc(&points_dev, num_triangles * sizeof(triangle));
size_t temp_offset = 0;
for (int i = 0; i < 9; i++) {
hipMemcpy(points_dev + temp_offset, point_array[i].data(),
num_triangles * sizeof(double), hipMemcpyHostToDevice);
temp_offset += num_triangles;
}
hipError_t err = hipGetLastError(); // add
if (err != hipSuccess) {
std::cout << "CUDA error: " << hipGetErrorString(err) << std::endl;
return 1;
}
timer_checkpoint(start);
std::cout << "Running 1st kernel... ";
for (unsigned layer_idx = 0; layer_idx < NUM_LAYERS; layer_idx += BLOCK_HEIGHT) {
hipLaunchKernelGGL(( rectTriIntersection), dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, points_dev, num_triangles, all_dev, layer_idx);
hipDeviceSynchronize();
checkCudaError();
size_t blocksPerGrid = (X_DIM * BLOCK_HEIGHT + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( layerExtraction), dim3(blocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, all_dev);
hipDeviceSynchronize();
checkCudaError();
size_t copy_size = (layer_idx + BLOCK_HEIGHT) < NUM_LAYERS ? BLOCK_HEIGHT : NUM_LAYERS - layer_idx;
copy_size = copy_size * X_DIM * Y_DIM * sizeof(bool);
#ifdef TEST
bool* host_addr = &all[X_DIM*Y_DIM*layer_idx];
#else
bool* host_addr = &all[0];
#endif
hipMemcpy(host_addr, all_dev, copy_size, hipMemcpyDeviceToHost);
hipMemset(all_dev, 0, size);
hipDeviceSynchronize();
checkCudaError();
}
timer_checkpoint(start);
hipFree(all_dev);
hipFree(points_dev);
#ifdef TEST
checkOutput(triangles_dev, num_triangles, all);
// for (int z = 0; z < NUM_LAYERS; z++) {
// for (int y = Y_DIM; y > 0; y--) {
// for (int x = 0; x < X_DIM; x++) {
// if (all[z*X_DIM*Y_DIM + y*X_DIM + x]) std::cout << "XX";
// else std::cout << " ";
// }
// std::cout << std::endl;
// }
// std::cout << std::endl << std::endl;
// }
#endif
hipFree(triangles_dev);
free(all);
return 0;
}
|
cccf980c77103cf64f7f114377fb6a5f662f62d4.cu
|
#include <iostream>
#include <string>
#include "triangle.cuh"
#include "slicer.cuh"
#include "golden.cuh"
#include <vector>
#include <chrono>
#define NOW (std::chrono::high_resolution_clock::now())
typedef std::chrono::time_point<std::chrono::high_resolution_clock> chrono_t;
void timer_checkpoint(chrono_t & checkpoint) {
#ifdef TEST
chrono_t end = NOW;
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - checkpoint);
std::cout << duration.count() << "ms" << std::endl;
checkpoint = end;
#else
std::cout << std::endl;
#endif
}
void checkCudaError() {
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
std::cout << "CUDA error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
}
int main(int argc, char* argv[]) {
std::string stl_file_name;
std::vector<triangle> triangles;
std::vector<std::vector<double>> point_array(9);
if (argc == 2) {
stl_file_name = argv[1];
} else if (argc > 2) {
std::cout << "ERROR: Too many command line arguments" << std::endl;
}
chrono_t start = NOW;
load_point_array(stl_file_name, point_array, triangles);
std::cout << "Reading STL file... ";
timer_checkpoint(start);
std::cout << "Allocating device memory... ";
int num_triangles = triangles.size();
triangle* triangles_dev;
double* points_dev;
// all[z][y][x]
#ifdef TEST
bool* all = (bool*)malloc(NUM_LAYERS * Y_DIM * X_DIM * sizeof(bool));
#else
bool* all = (bool*)malloc(BLOCK_HEIGHT * Y_DIM * X_DIM * sizeof(bool));
#endif
bool* all_dev;
size_t size = BLOCK_HEIGHT * Y_DIM * X_DIM * sizeof(bool);
cudaMalloc(&all_dev, size);
cudaMemset(all_dev, 0, size);
cudaMalloc(&triangles_dev, num_triangles * sizeof(triangle));
cudaMemcpy(triangles_dev, triangles.data(), num_triangles * sizeof(triangle), cudaMemcpyHostToDevice);
cudaMalloc(&points_dev, num_triangles * sizeof(triangle));
size_t temp_offset = 0;
for (int i = 0; i < 9; i++) {
cudaMemcpy(points_dev + temp_offset, point_array[i].data(),
num_triangles * sizeof(double), cudaMemcpyHostToDevice);
temp_offset += num_triangles;
}
cudaError_t err = cudaGetLastError(); // add
if (err != cudaSuccess) {
std::cout << "CUDA error: " << cudaGetErrorString(err) << std::endl;
return 1;
}
timer_checkpoint(start);
std::cout << "Running 1st kernel... ";
for (unsigned layer_idx = 0; layer_idx < NUM_LAYERS; layer_idx += BLOCK_HEIGHT) {
rectTriIntersection<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>(points_dev, num_triangles, all_dev, layer_idx);
cudaDeviceSynchronize();
checkCudaError();
size_t blocksPerGrid = (X_DIM * BLOCK_HEIGHT + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
layerExtraction<<<blocksPerGrid, THREADS_PER_BLOCK>>>(all_dev);
cudaDeviceSynchronize();
checkCudaError();
size_t copy_size = (layer_idx + BLOCK_HEIGHT) < NUM_LAYERS ? BLOCK_HEIGHT : NUM_LAYERS - layer_idx;
copy_size = copy_size * X_DIM * Y_DIM * sizeof(bool);
#ifdef TEST
bool* host_addr = &all[X_DIM*Y_DIM*layer_idx];
#else
bool* host_addr = &all[0];
#endif
cudaMemcpy(host_addr, all_dev, copy_size, cudaMemcpyDeviceToHost);
cudaMemset(all_dev, 0, size);
cudaDeviceSynchronize();
checkCudaError();
}
timer_checkpoint(start);
cudaFree(all_dev);
cudaFree(points_dev);
#ifdef TEST
checkOutput(triangles_dev, num_triangles, all);
// for (int z = 0; z < NUM_LAYERS; z++) {
// for (int y = Y_DIM; y > 0; y--) {
// for (int x = 0; x < X_DIM; x++) {
// if (all[z*X_DIM*Y_DIM + y*X_DIM + x]) std::cout << "XX";
// else std::cout << " ";
// }
// std::cout << std::endl;
// }
// std::cout << std::endl << std::endl;
// }
#endif
cudaFree(triangles_dev);
free(all);
return 0;
}
|
f42ade94a43399713cce708675322e430e7d0928.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Thread2D.h"
#include "cudas.h"
#include "DamierHueFloatMath.h"
#include "Indices_GPU.h"
#include "DomaineMath_GPU.h"
using namespace gpu;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void damierHueFloat(float* tabPixelsGM , uint w , uint h , DomaineMath domaineMath , uint n , float t);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void damierHueFloat(float* tabPixelsGM , uint w , uint h , DomaineMath domaineMath , uint n , float t)
{
DamierHueFloatMath damierHueFloatMath(n,t);
const int TID = Thread2D::tid();
const int NB_THREAD = Thread2D::nbThread();
const int WH = w * h;
double x;
double y;
int i; // in [0,h[
int j; // in [0,w[
int s = TID;
while (s < WH)
{
Indices::toIJ(s, w, &i, &j); // update (i, j)
// (i,j) domaine ecran
// (x,y) domaine math
domaineMath.toXY(i, j, &x, &y); // (i,j) -> (x,y)
damierHueFloatMath.colorXY(&tabPixelsGM[s], x, y); // update ptrDevPixels[s]
s += NB_THREAD;
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
f42ade94a43399713cce708675322e430e7d0928.cu
|
#include "Thread2D.h"
#include "cudas.h"
#include "DamierHueFloatMath.h"
#include "Indices_GPU.h"
#include "DomaineMath_GPU.h"
using namespace gpu;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void damierHueFloat(float* tabPixelsGM , uint w , uint h , DomaineMath domaineMath , uint n , float t);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void damierHueFloat(float* tabPixelsGM , uint w , uint h , DomaineMath domaineMath , uint n , float t)
{
DamierHueFloatMath damierHueFloatMath(n,t);
const int TID = Thread2D::tid();
const int NB_THREAD = Thread2D::nbThread();
const int WH = w * h;
double x;
double y;
int i; // in [0,h[
int j; // in [0,w[
int s = TID;
while (s < WH)
{
Indices::toIJ(s, w, &i, &j); // update (i, j)
// (i,j) domaine ecran
// (x,y) domaine math
domaineMath.toXY(i, j, &x, &y); // (i,j) -> (x,y)
damierHueFloatMath.colorXY(&tabPixelsGM[s], x, y); // update ptrDevPixels[s]
s += NB_THREAD;
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
3f0f7abf22eebf962ae7b5e3695c085960b4dfcf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include "custom_cuda_layers.h"
namespace cg = cooperative_groups;
__global__ void qunatize_kernel(__half* vals, int group_size, int num_bits)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
float2* vals_cast = reinterpret_cast<float2*>(vals);
float2 data[MAX_REG];
int group_id = blockIdx.x;
{
int group_index = id;
int reg_count = 0;
int offset = group_id * group_size;
float max = -10000.0;
while (group_index < group_size && reg_count < MAX_REG) {
data[reg_count] = vals_cast[offset + group_index];
__half* data_h = reinterpret_cast<__half*>(&data[reg_count]);
if (abs((float)data_h[0]) > max) max = abs((float)data_h[0]);
if (abs((float)data_h[1]) > max) max = abs((float)data_h[1]);
if (abs((float)data_h[2]) > max) max = abs((float)data_h[2]);
if (abs((float)data_h[3]) > max) max = abs((float)data_h[3]);
group_index += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
__shared__ float partialMax[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
b.sync();
if (lane < warp_num) max = partialMax[lane];
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
max = g.shfl(max, 0);
float q_scale = (1 << num_bits) / (2 * max + 1e-5);
float q_scale_inv = 1 / q_scale;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + id;
if (group_index < group_size) {
__half2* data_h = reinterpret_cast<__half2*>(&data[i]);
float2 q_data[2];
q_data[0] = __half22float2(data_h[0]);
q_data[1] = __half22float2(data_h[1]);
float2 q_data_int[2];
q_data_int[0].x = roundf(q_data[0].x * q_scale);
q_data_int[0].y = roundf(q_data[0].y * q_scale);
q_data_int[1].x = roundf(q_data[1].x * q_scale);
q_data_int[1].y = roundf(q_data[1].y * q_scale);
q_data_int[0].x *= q_scale_inv;
q_data_int[0].y *= q_scale_inv;
q_data_int[1].x *= q_scale_inv;
q_data_int[1].y *= q_scale_inv;
data_h[0] = __float22half2_rn(q_data_int[0]);
data_h[1] = __float22half2_rn(q_data_int[1]);
vals_cast[offset + group_index] = data[i];
}
}
}
#endif
}
__global__ void qunatize_kernel(float* vals, int group_size, int num_bits)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
float4* vals_cast = reinterpret_cast<float4*>(vals);
float4 data[MAX_REG];
int bid = blockIdx.x;
int group_index = bid * group_size + id;
int reg_count = 0;
float max = -10000.0;
while (id < group_size && reg_count < MAX_REG) {
float4 data_reg = vals_cast[group_index];
data[reg_count] = data_reg;
if (abs(data_reg.x) > max) max = abs(data_reg.x);
if (abs(data_reg.y) > max) max = abs(data_reg.y);
if (abs(data_reg.z) > max) max = abs(data_reg.z);
if (abs(data_reg.w) > max) max = abs(data_reg.w);
group_index += blockDim.x;
id += blockDim.x;
reg_count++;
}
id = threadIdx.x;
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
__shared__ float partialMax[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
b.sync();
if (lane < warp_num) max = partialMax[lane];
b.sync();
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
max = g.shfl(max, 0);
float q_scale = (1 << num_bits) / (2 * max + 1e-5);
float q_scale_inv = 1 / q_scale;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + id;
if (group_index < group_size) {
float4 q_data;
q_data = data[i];
float4 q_data_int;
q_data_int.x = roundf(q_data.x * q_scale);
q_data_int.y = roundf(q_data.y * q_scale);
q_data_int.w = roundf(q_data.w * q_scale);
q_data_int.z = roundf(q_data.z * q_scale);
q_data.x = q_data_int.x * q_scale_inv;
q_data.y = q_data_int.y * q_scale_inv;
q_data.w = q_data_int.w * q_scale_inv;
q_data.z = q_data_int.z * q_scale_inv;
vals_cast[group_index + bid * group_size] = q_data;
}
}
}
template <typename T>
void launch_qunatize_kernel(T* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream)
{
dim3 grid_dim(group_num);
dim3 block_dim(1024);
hipLaunchKernelGGL(( qunatize_kernel), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, (total_count / group_num) / 4, num_bits);
}
template void launch_qunatize_kernel(float* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream);
template void launch_qunatize_kernel(__half* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream);
__global__ void sr_qunatize_kernel(__half* vals,
int token_size,
int token_num,
int num_bits,
std::pair<uint64_t, uint64_t> seed)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float2* vals_cast = reinterpret_cast<float2*>(vals);
__half2 data_low[128];
__half2 data_high[128];
int bid = blockIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed.first, idx, seed.second, &state);
unsigned int tid = threadIdx.x;
int reg_count = 0;
int offset = bid * token_size;
int group_index = bid * token_size + tid;
int total_count = token_size * token_num;
if (group_index < total_count) {
// float min = 10000.0;
float max = -10000.0;
while (tid < token_size) {
float2 data = vals_cast[offset + tid];
__half2* data_h = reinterpret_cast<__half2*>(&data);
data_low[reg_count] = data_h[0];
data_high[reg_count] = data_h[1];
float2 data_f[2];
data_f[0] = __half22float2(data_h[0]);
data_f[1] = __half22float2(data_h[1]);
if (abs((float)data_f[0].x) > max) max = abs((float)data_f[0].x);
if (abs((float)data_f[0].y) > max) max = abs((float)data_f[0].y);
if (abs((float)data_f[1].x) > max) max = abs((float)data_f[1].x);
if (abs((float)data_f[1].y) > max) max = abs((float)data_f[1].y);
tid += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
__shared__ float partialMax[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
b.sync();
if (lane < warp_num) max = partialMax[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
max = g.shfl(max, 0);
float q_scale_val = (float)(1 << num_bits) / (max * 2 + 1e-5);
float high_q = (float)((1 << (num_bits - 1)) - 1);
float low_q = (float)(-((1 << (num_bits - 1))));
for (int i = 0; i < reg_count; i++) {
int token_index = i * blockDim.x + threadIdx.x;
if (token_index < token_size) {
float2 data_f[2];
data_f[0] = __half22float2(data_low[i]);
data_f[1] = __half22float2(data_high[i]);
float2 q_data_int[2];
q_data_int[0].x = (float)((int)(data_f[0].x * q_scale_val));
q_data_int[0].y = (float)((int)(data_f[0].y * q_scale_val));
q_data_int[1].x = (float)((int)(data_f[1].x * q_scale_val));
q_data_int[1].y = (float)((int)(data_f[1].y * q_scale_val));
// Stochastic rounding
float4 rand = hiprand_uniform4(&state);
float q_error[4];
q_error[0] = abs(data_f[0].x - (q_data_int[0].x / q_scale_val)) * q_scale_val;
q_error[1] = abs(data_f[0].y - (q_data_int[0].y / q_scale_val)) * q_scale_val;
q_error[2] = abs(data_f[1].x - (q_data_int[1].x / q_scale_val)) * q_scale_val;
q_error[3] = abs(data_f[1].y - (q_data_int[1].y / q_scale_val)) * q_scale_val;
q_data_int[0].x =
(rand.x < q_error[0] && q_data_int[0].x > low_q && q_data_int[0].x < high_q)
? (q_data_int[0].x + (data_f[0].x > 0 ? 1 : -1))
: q_data_int[0].x;
q_data_int[0].y =
(rand.y < q_error[1] && q_data_int[0].y > low_q && q_data_int[0].y < high_q)
? (q_data_int[0].y + (data_f[0].y > 0 ? 1 : -1))
: q_data_int[0].y;
q_data_int[1].x =
(rand.w < q_error[2] && q_data_int[1].x > low_q && q_data_int[1].x < high_q)
? (q_data_int[1].x + (data_f[1].x > 0 ? 1 : -1))
: q_data_int[1].x;
q_data_int[1].y =
(rand.z < q_error[3] && q_data_int[1].y > low_q && q_data_int[1].y < high_q)
? (q_data_int[1].y + (data_f[1].y > 0 ? 1 : -1))
: q_data_int[1].y;
data_f[0].x = q_data_int[0].x / q_scale_val;
data_f[0].y = q_data_int[0].y / q_scale_val;
data_f[1].x = q_data_int[1].x / q_scale_val;
data_f[1].y = q_data_int[1].y / q_scale_val;
float2 result;
__half2* result_h = reinterpret_cast<__half2*>(&result);
result_h[0] = __float22half2_rn(data_f[0]);
result_h[1] = __float22half2_rn(data_f[1]);
vals_cast[offset + token_index] = result;
}
}
}
#endif
}
__global__ void sr_qunatize_kernel(float* vals,
int token_size,
int token_num,
int num_bits,
std::pair<uint64_t, uint64_t> seed)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
int idx = blockIdx.x * blockDim.x + id;
float4* vals_cast = reinterpret_cast<float4*>(vals);
float4 data[128];
int bid = blockIdx.x;
int tid = threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed.first, idx, seed.second, &state);
int group_index = bid * token_size + threadIdx.x;
int reg_count = 0;
int total_count = token_size * token_num;
if (group_index < total_count) {
// float min = 10000.0;
float max = -10000.0;
while (tid < token_size) {
data[reg_count] = vals_cast[group_index];
if (abs(data[reg_count].x) > max) max = abs(data[reg_count].x);
if (abs(data[reg_count].y) > max) max = abs(data[reg_count].y);
if (abs(data[reg_count].z) > max) max = abs(data[reg_count].z);
if (abs(data[reg_count].w) > max) max = abs(data[reg_count].w);
group_index += blockDim.x;
tid += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
__shared__ float partialMax[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
b.sync();
if (lane < warp_num) max = partialMax[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
max = g.shfl(max, 0);
float q_scale_val = (float)(1 << num_bits) / (max * 2 + 1e-5);
float high_q = (float)((1 << (num_bits - 1)) - 1);
float low_q = (float)(-((1 << (num_bits - 1))));
int offset = (bid)*token_size;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + threadIdx.x;
if (group_index < token_size) {
float4 q_data = data[i];
float4 q_data_int;
q_data_int.x = (float)((int)(q_data.x * q_scale_val));
q_data_int.y = (float)((int)(q_data.y * q_scale_val));
q_data_int.w = (float)((int)(q_data.w * q_scale_val));
q_data_int.z = (float)((int)(q_data.z * q_scale_val));
// Stochastic rounding
float4 rand = hiprand_uniform4(&state);
float q_error[4];
q_error[0] = abs(q_data.x - (q_data_int.x / q_scale_val)) * q_scale_val;
q_error[1] = abs(q_data.y - (q_data_int.y / q_scale_val)) * q_scale_val;
q_error[2] = abs(q_data.w - (q_data_int.w / q_scale_val)) * q_scale_val;
q_error[3] = abs(q_data.z - (q_data_int.z / q_scale_val)) * q_scale_val;
q_data_int.x =
(rand.x < q_error[0] && q_data_int.x > low_q && q_data_int.x < high_q)
? (q_data_int.x + (q_data.x > 0 ? 1 : -1))
: q_data_int.x;
q_data_int.y =
(rand.y < q_error[1] && q_data_int.y > low_q && q_data_int.y < high_q)
? (q_data_int.y + (q_data.y > 0 ? 1 : -1))
: q_data_int.y;
q_data_int.w =
(rand.w < q_error[2] && q_data_int.w > low_q && q_data_int.w < high_q)
? (q_data_int.w + (q_data.w > 0 ? 1 : -1))
: q_data_int.w;
q_data_int.z =
(rand.z < q_error[3] && q_data_int.z > low_q && q_data_int.z < high_q)
? (q_data_int.z + (q_data.z > 0 ? 1 : -1))
: q_data_int.z;
q_data_int.x /= q_scale_val;
q_data_int.y /= q_scale_val;
q_data_int.w /= q_scale_val;
q_data_int.z /= q_scale_val;
vals_cast[group_index + offset] = q_data_int;
}
}
}
}
template <typename T>
void launch_sr_qunatize_kernel(T* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream)
{
dim3 block_dim(1024);
dim3 grid_dim(group_num);
uint64_t inc = total_count / grid_dim.x / block_dim.x;
std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc);
hipLaunchKernelGGL(( sr_qunatize_kernel), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, (total_count / group_num) / 4, group_num, num_bits, seed);
}
template void launch_sr_qunatize_kernel(float* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream);
template void launch_sr_qunatize_kernel(__half* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream);
__global__ void qunatize_kernel_asym(__half* vals, int group_size, int num_bits)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
float2* vals_cast = reinterpret_cast<float2*>(vals);
float2 data[MAX_REG];
int group_id = blockIdx.x;
{
int group_index = id;
int reg_count = 0;
int offset = group_id * group_size;
float max = -10000.0;
float min = 10000.0;
while (group_index < group_size && reg_count < MAX_REG) {
data[reg_count] = vals_cast[offset + group_index];
__half* data_h = reinterpret_cast<__half*>(&data[reg_count]);
if (((float)data_h[0]) > max) max = (float)data_h[0];
if (((float)data_h[1]) > max) max = (float)data_h[1];
if (((float)data_h[2]) > max) max = (float)data_h[2];
if (((float)data_h[3]) > max) max = (float)data_h[3];
if (((float)data_h[0]) < min) min = (float)data_h[0];
if (((float)data_h[1]) < min) min = (float)data_h[1];
if (((float)data_h[2]) < min) min = (float)data_h[2];
if (((float)data_h[3]) < min) min = (float)data_h[3];
group_index += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(min, i);
if (min > temp) min = temp;
}
__shared__ float partialMax[WARP_SIZE];
__shared__ float partialMin[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
if (lane == 0) partialMin[gid] = min;
b.sync();
if (lane < warp_num) max = partialMax[lane];
if (lane < warp_num) min = partialMin[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(min, i);
if (min > temp) min = temp;
}
max = g.shfl(max, 0);
min = g.shfl(min, 0);
float q_scale = ((max - min) + 1e-5) / (float)(1 << num_bits);
float q_scale_inv = 1 / q_scale;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + id;
if (group_index < group_size) {
__half2* data_h = reinterpret_cast<__half2*>(&data[i]);
float2 q_data[2];
q_data[0] = __half22float2(data_h[0]);
q_data[1] = __half22float2(data_h[1]);
float2 q_data_int[2];
q_data_int[0].x = roundf((q_data[0].x - min) * q_scale_inv);
q_data_int[0].y = roundf((q_data[0].y - min) * q_scale_inv);
q_data_int[1].x = roundf((q_data[1].x - min) * q_scale_inv);
q_data_int[1].y = roundf((q_data[1].y - min) * q_scale_inv);
q_data_int[0].x = q_data_int[0].x * q_scale + min;
q_data_int[0].y = q_data_int[0].y * q_scale + min;
q_data_int[1].x = q_data_int[1].x * q_scale + min;
q_data_int[1].y = q_data_int[1].y * q_scale + min;
data_h[0] = __float22half2_rn(q_data_int[0]);
data_h[1] = __float22half2_rn(q_data_int[1]);
vals_cast[offset + group_index] = data[i];
}
}
}
#endif
}
__global__ void qunatize_kernel_asym(float* vals, int group_size, int num_bits)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
float4* vals_cast = reinterpret_cast<float4*>(vals);
float4 data[MAX_REG];
int bid = blockIdx.x;
int group_index = bid * group_size + id;
int reg_count = 0;
float max = -10000.0;
float min = 10000.0;
while (id < group_size && reg_count < MAX_REG) {
float4 data_reg = vals_cast[group_index];
data[reg_count] = data_reg;
if (data_reg.x > max) max = data_reg.x;
if (data_reg.y > max) max = data_reg.y;
if (data_reg.w > max) max = data_reg.w;
if (data_reg.z > max) max = data_reg.z;
if (data_reg.x < min) min = data_reg.x;
if (data_reg.y < min) min = data_reg.y;
if (data_reg.w < min) min = data_reg.w;
if (data_reg.z < min) min = data_reg.z;
group_index += blockDim.x;
id += blockDim.x;
reg_count++;
}
id = threadIdx.x;
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(min, i);
if (min > temp) min = temp;
}
__shared__ float partialMax[WARP_SIZE];
__shared__ float partialMin[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
if (lane == 0) partialMin[gid] = min;
b.sync();
if (lane < warp_num) max = partialMax[lane];
if (lane < warp_num) min = partialMin[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(min, i);
if (min > temp) min = temp;
}
max = g.shfl(max, 0);
min = g.shfl(min, 0);
float q_scale = ((max - min) + 1e-5) / (float)(1 << num_bits);
float q_scale_inv = 1 / q_scale;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + id;
if (group_index < group_size) {
float4 q_data;
q_data = data[i];
float4 q_data_int;
q_data_int.x = roundf((q_data.x - min) * q_scale_inv);
q_data_int.y = roundf((q_data.y - min) * q_scale_inv);
q_data_int.w = roundf((q_data.w - min) * q_scale_inv);
q_data_int.z = roundf((q_data.z - min) * q_scale_inv);
q_data.x = q_data_int.x * q_scale + min;
q_data.y = q_data_int.y * q_scale + min;
q_data.w = q_data_int.w * q_scale + min;
q_data.z = q_data_int.z * q_scale + min;
vals_cast[group_index + bid * group_size] = q_data;
}
}
}
template <typename T>
void launch_qunatize_kernel_asym(T* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream)
{
dim3 grid_dim(group_num);
dim3 block_dim(1024);
hipLaunchKernelGGL(( qunatize_kernel_asym), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, (total_count / group_num) / 4, num_bits);
}
template void launch_qunatize_kernel_asym(float* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream);
template void launch_qunatize_kernel_asym(__half* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream);
__global__ void sr_qunatize_kernel_asym(__half* vals,
int token_size,
int token_num,
int num_bits,
std::pair<uint64_t, uint64_t> seed)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float2* vals_cast = reinterpret_cast<float2*>(vals);
__half2 data_low[128];
__half2 data_high[128];
int bid = blockIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed.first, idx, seed.second, &state);
unsigned int tid = threadIdx.x;
int reg_count = 0;
int offset = bid * token_size;
int group_index = bid * token_size + tid;
int total_count = token_size * token_num;
if (group_index < total_count) {
float min = 10000.0;
float max = -10000.0;
while (tid < token_size) {
float2 data = vals_cast[offset + tid];
__half2* data_h = reinterpret_cast<__half2*>(&data);
data_low[reg_count] = data_h[0];
data_high[reg_count] = data_h[1];
float2 data_f[2];
data_f[0] = __half22float2(data_h[0]);
data_f[1] = __half22float2(data_h[1]);
if (((float)data_f[0].x) > max) max = (float)data_f[0].x;
if (((float)data_f[0].y) > max) max = (float)data_f[0].y;
if (((float)data_f[1].x) > max) max = (float)data_f[1].x;
if (((float)data_f[1].y) > max) max = (float)data_f[1].y;
if (((float)data_f[0].x) < min) min = (float)data_f[0].x;
if (((float)data_f[0].y) < min) min = (float)data_f[0].y;
if (((float)data_f[1].x) < min) min = (float)data_f[1].x;
if (((float)data_f[1].y) < min) min = (float)data_f[1].y;
tid += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(min, i);
if (min > temp) min = temp;
}
__shared__ float partialMax[WARP_SIZE];
__shared__ float partialMin[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
if (lane == 0) partialMin[gid] = min;
b.sync();
if (lane < warp_num) max = partialMax[lane];
if (lane < warp_num) min = partialMin[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(min, i);
if (min > temp) min = temp;
}
max = g.shfl(max, 0);
min = g.shfl(min, 0);
float q_scale_val = ((max - min) + 1e-5) / (float)(1 << num_bits);
float q_scale_val_inv = 1 / q_scale_val;
float high_q = (float)((1 << num_bits) - 1);
for (int i = 0; i < reg_count; i++) {
int token_index = i * blockDim.x + threadIdx.x;
if (token_index < token_size) {
float2 data_f[2];
data_f[0] = __half22float2(data_low[i]);
data_f[1] = __half22float2(data_high[i]);
float2 q_data_int[2];
q_data_int[0].x = (float)((unsigned int)((data_f[0].x - min) * q_scale_val_inv));
q_data_int[0].y = (float)((unsigned int)((data_f[0].y - min) * q_scale_val_inv));
q_data_int[1].x = (float)((unsigned int)((data_f[1].x - min) * q_scale_val_inv));
q_data_int[1].y = (float)((unsigned int)((data_f[1].y - min) * q_scale_val_inv));
// Stochastic rounding
float4 rand = hiprand_uniform4(&state);
float q_error[4];
q_error[0] =
abs(data_f[0].x - ((q_data_int[0].x * q_scale_val) + min)) * q_scale_val_inv;
q_error[1] =
abs(data_f[0].y - ((q_data_int[0].y * q_scale_val) + min)) * q_scale_val_inv;
q_error[2] =
abs(data_f[1].x - ((q_data_int[1].x * q_scale_val) + min)) * q_scale_val_inv;
q_error[3] =
abs(data_f[1].y - ((q_data_int[1].y * q_scale_val) + min)) * q_scale_val_inv;
q_data_int[0].x = (rand.x < q_error[0] && q_data_int[0].x < high_q)
? (q_data_int[0].x + 1)
: q_data_int[0].x;
q_data_int[0].y = (rand.y < q_error[1] && q_data_int[0].y < high_q)
? (q_data_int[0].y + 1)
: q_data_int[0].y;
q_data_int[1].x = (rand.w < q_error[2] && q_data_int[1].x < high_q)
? (q_data_int[1].x + 1)
: q_data_int[1].x;
q_data_int[1].y = (rand.z < q_error[3] && q_data_int[1].y < high_q)
? (q_data_int[1].y + 1)
: q_data_int[1].y;
data_f[0].x = q_data_int[0].x * q_scale_val + min;
data_f[0].y = q_data_int[0].y * q_scale_val + min;
data_f[1].x = q_data_int[1].x * q_scale_val + min;
data_f[1].y = q_data_int[1].y * q_scale_val + min;
float2 result;
__half2* result_h = reinterpret_cast<__half2*>(&result);
result_h[0] = __float22half2_rn(data_f[0]);
result_h[1] = __float22half2_rn(data_f[1]);
vals_cast[offset + token_index] = result;
}
}
}
#endif
}
__global__ void sr_qunatize_kernel_asym(float* vals,
int token_size,
int token_num,
int num_bits,
std::pair<uint64_t, uint64_t> seed)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
int idx = blockIdx.x * blockDim.x + id;
float4* vals_cast = reinterpret_cast<float4*>(vals);
float4 data[128];
int bid = blockIdx.x;
int tid = threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed.first, idx, seed.second, &state);
int group_index = bid * token_size + threadIdx.x;
int reg_count = 0;
int total_count = token_size * token_num;
if (group_index < total_count) {
float min = 10000.0;
float max = -10000.0;
while (tid < token_size) {
float4 data_reg = vals_cast[group_index];
data[reg_count] = data_reg;
if (data_reg.x > max) max = data_reg.x;
if (data_reg.y > max) max = data_reg.y;
if (data_reg.w > max) max = data_reg.w;
if (data_reg.z > max) max = data_reg.z;
if (data_reg.x < min) min = data_reg.x;
if (data_reg.y < min) min = data_reg.y;
if (data_reg.w < min) min = data_reg.w;
if (data_reg.z < min) min = data_reg.z;
group_index += blockDim.x;
tid += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(min, i);
if (min > temp) min = temp;
}
__shared__ float partialMax[WARP_SIZE];
__shared__ float partialMin[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
if (lane == 0) partialMin[gid] = min;
b.sync();
if (lane < warp_num) max = partialMax[lane];
if (lane < warp_num) min = partialMin[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(min, i);
if (min > temp) min = temp;
}
max = g.shfl(max, 0);
min = g.shfl(min, 0);
float q_scale_val = ((max - min) + 1e-5) / (float)(1 << num_bits);
float high_q = (float)((1 << num_bits) - 1);
int offset = (bid)*token_size;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + threadIdx.x;
if (group_index < token_size) {
float4 q_data = data[i];
float4 q_data_int;
q_data_int.x = (float)((int)((q_data.x - min) / q_scale_val));
q_data_int.y = (float)((int)((q_data.y - min) / q_scale_val));
q_data_int.w = (float)((int)((q_data.w - min) / q_scale_val));
q_data_int.z = (float)((int)((q_data.z - min) / q_scale_val));
// Stochastic rounding
float4 rand = hiprand_uniform4(&state);
float q_error[4];
q_error[0] = abs(q_data.x - ((q_data_int.x * q_scale_val) + min)) / q_scale_val;
q_error[1] = abs(q_data.y - ((q_data_int.y * q_scale_val) + min)) / q_scale_val;
q_error[2] = abs(q_data.w - ((q_data_int.w * q_scale_val) + min)) / q_scale_val;
q_error[3] = abs(q_data.z - ((q_data_int.z * q_scale_val) + min)) / q_scale_val;
q_data_int.x = (rand.x < q_error[0] && q_data_int.x < high_q) ? (q_data_int.x + 1)
: q_data_int.x;
q_data_int.y = (rand.y < q_error[1] && q_data_int.y < high_q) ? (q_data_int.y + 1)
: q_data_int.y;
q_data_int.w = (rand.w < q_error[2] && q_data_int.w < high_q) ? (q_data_int.w + 1)
: q_data_int.w;
q_data_int.z = (rand.z < q_error[3] && q_data_int.z < high_q) ? (q_data_int.z + 1)
: q_data_int.z;
q_data_int.x = q_data_int.x * q_scale_val + min;
q_data_int.y = q_data_int.y * q_scale_val + min;
q_data_int.w = q_data_int.w * q_scale_val + min;
q_data_int.z = q_data_int.z * q_scale_val + min;
vals_cast[group_index + offset] = q_data_int;
}
}
}
}
template <typename T>
void launch_sr_qunatize_kernel_asym(T* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream)
{
dim3 block_dim(1024);
dim3 grid_dim(group_num);
uint64_t inc = total_count / grid_dim.x / block_dim.x;
std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc);
hipLaunchKernelGGL(( sr_qunatize_kernel), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, (total_count / group_num) / 4, group_num, num_bits, seed);
}
template void launch_sr_qunatize_kernel_asym(float* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream);
template void launch_sr_qunatize_kernel_asym(__half* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream);
|
3f0f7abf22eebf962ae7b5e3695c085960b4dfcf.cu
|
#include <math.h>
#include "custom_cuda_layers.h"
namespace cg = cooperative_groups;
__global__ void qunatize_kernel(__half* vals, int group_size, int num_bits)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
float2* vals_cast = reinterpret_cast<float2*>(vals);
float2 data[MAX_REG];
int group_id = blockIdx.x;
{
int group_index = id;
int reg_count = 0;
int offset = group_id * group_size;
float max = -10000.0;
while (group_index < group_size && reg_count < MAX_REG) {
data[reg_count] = vals_cast[offset + group_index];
__half* data_h = reinterpret_cast<__half*>(&data[reg_count]);
if (abs((float)data_h[0]) > max) max = abs((float)data_h[0]);
if (abs((float)data_h[1]) > max) max = abs((float)data_h[1]);
if (abs((float)data_h[2]) > max) max = abs((float)data_h[2]);
if (abs((float)data_h[3]) > max) max = abs((float)data_h[3]);
group_index += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
__shared__ float partialMax[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
b.sync();
if (lane < warp_num) max = partialMax[lane];
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
max = g.shfl(max, 0);
float q_scale = (1 << num_bits) / (2 * max + 1e-5);
float q_scale_inv = 1 / q_scale;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + id;
if (group_index < group_size) {
__half2* data_h = reinterpret_cast<__half2*>(&data[i]);
float2 q_data[2];
q_data[0] = __half22float2(data_h[0]);
q_data[1] = __half22float2(data_h[1]);
float2 q_data_int[2];
q_data_int[0].x = roundf(q_data[0].x * q_scale);
q_data_int[0].y = roundf(q_data[0].y * q_scale);
q_data_int[1].x = roundf(q_data[1].x * q_scale);
q_data_int[1].y = roundf(q_data[1].y * q_scale);
q_data_int[0].x *= q_scale_inv;
q_data_int[0].y *= q_scale_inv;
q_data_int[1].x *= q_scale_inv;
q_data_int[1].y *= q_scale_inv;
data_h[0] = __float22half2_rn(q_data_int[0]);
data_h[1] = __float22half2_rn(q_data_int[1]);
vals_cast[offset + group_index] = data[i];
}
}
}
#endif
}
__global__ void qunatize_kernel(float* vals, int group_size, int num_bits)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
float4* vals_cast = reinterpret_cast<float4*>(vals);
float4 data[MAX_REG];
int bid = blockIdx.x;
int group_index = bid * group_size + id;
int reg_count = 0;
float max = -10000.0;
while (id < group_size && reg_count < MAX_REG) {
float4 data_reg = vals_cast[group_index];
data[reg_count] = data_reg;
if (abs(data_reg.x) > max) max = abs(data_reg.x);
if (abs(data_reg.y) > max) max = abs(data_reg.y);
if (abs(data_reg.z) > max) max = abs(data_reg.z);
if (abs(data_reg.w) > max) max = abs(data_reg.w);
group_index += blockDim.x;
id += blockDim.x;
reg_count++;
}
id = threadIdx.x;
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
__shared__ float partialMax[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
b.sync();
if (lane < warp_num) max = partialMax[lane];
b.sync();
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
max = g.shfl(max, 0);
float q_scale = (1 << num_bits) / (2 * max + 1e-5);
float q_scale_inv = 1 / q_scale;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + id;
if (group_index < group_size) {
float4 q_data;
q_data = data[i];
float4 q_data_int;
q_data_int.x = roundf(q_data.x * q_scale);
q_data_int.y = roundf(q_data.y * q_scale);
q_data_int.w = roundf(q_data.w * q_scale);
q_data_int.z = roundf(q_data.z * q_scale);
q_data.x = q_data_int.x * q_scale_inv;
q_data.y = q_data_int.y * q_scale_inv;
q_data.w = q_data_int.w * q_scale_inv;
q_data.z = q_data_int.z * q_scale_inv;
vals_cast[group_index + bid * group_size] = q_data;
}
}
}
template <typename T>
void launch_qunatize_kernel(T* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream)
{
dim3 grid_dim(group_num);
dim3 block_dim(1024);
qunatize_kernel<<<grid_dim, block_dim, 0, stream>>>(
vals, (total_count / group_num) / 4, num_bits);
}
template void launch_qunatize_kernel(float* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream);
template void launch_qunatize_kernel(__half* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream);
__global__ void sr_qunatize_kernel(__half* vals,
int token_size,
int token_num,
int num_bits,
std::pair<uint64_t, uint64_t> seed)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float2* vals_cast = reinterpret_cast<float2*>(vals);
__half2 data_low[128];
__half2 data_high[128];
int bid = blockIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(seed.first, idx, seed.second, &state);
unsigned int tid = threadIdx.x;
int reg_count = 0;
int offset = bid * token_size;
int group_index = bid * token_size + tid;
int total_count = token_size * token_num;
if (group_index < total_count) {
// float min = 10000.0;
float max = -10000.0;
while (tid < token_size) {
float2 data = vals_cast[offset + tid];
__half2* data_h = reinterpret_cast<__half2*>(&data);
data_low[reg_count] = data_h[0];
data_high[reg_count] = data_h[1];
float2 data_f[2];
data_f[0] = __half22float2(data_h[0]);
data_f[1] = __half22float2(data_h[1]);
if (abs((float)data_f[0].x) > max) max = abs((float)data_f[0].x);
if (abs((float)data_f[0].y) > max) max = abs((float)data_f[0].y);
if (abs((float)data_f[1].x) > max) max = abs((float)data_f[1].x);
if (abs((float)data_f[1].y) > max) max = abs((float)data_f[1].y);
tid += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
__shared__ float partialMax[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
b.sync();
if (lane < warp_num) max = partialMax[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
max = g.shfl(max, 0);
float q_scale_val = (float)(1 << num_bits) / (max * 2 + 1e-5);
float high_q = (float)((1 << (num_bits - 1)) - 1);
float low_q = (float)(-((1 << (num_bits - 1))));
for (int i = 0; i < reg_count; i++) {
int token_index = i * blockDim.x + threadIdx.x;
if (token_index < token_size) {
float2 data_f[2];
data_f[0] = __half22float2(data_low[i]);
data_f[1] = __half22float2(data_high[i]);
float2 q_data_int[2];
q_data_int[0].x = (float)((int)(data_f[0].x * q_scale_val));
q_data_int[0].y = (float)((int)(data_f[0].y * q_scale_val));
q_data_int[1].x = (float)((int)(data_f[1].x * q_scale_val));
q_data_int[1].y = (float)((int)(data_f[1].y * q_scale_val));
// Stochastic rounding
float4 rand = curand_uniform4(&state);
float q_error[4];
q_error[0] = abs(data_f[0].x - (q_data_int[0].x / q_scale_val)) * q_scale_val;
q_error[1] = abs(data_f[0].y - (q_data_int[0].y / q_scale_val)) * q_scale_val;
q_error[2] = abs(data_f[1].x - (q_data_int[1].x / q_scale_val)) * q_scale_val;
q_error[3] = abs(data_f[1].y - (q_data_int[1].y / q_scale_val)) * q_scale_val;
q_data_int[0].x =
(rand.x < q_error[0] && q_data_int[0].x > low_q && q_data_int[0].x < high_q)
? (q_data_int[0].x + (data_f[0].x > 0 ? 1 : -1))
: q_data_int[0].x;
q_data_int[0].y =
(rand.y < q_error[1] && q_data_int[0].y > low_q && q_data_int[0].y < high_q)
? (q_data_int[0].y + (data_f[0].y > 0 ? 1 : -1))
: q_data_int[0].y;
q_data_int[1].x =
(rand.w < q_error[2] && q_data_int[1].x > low_q && q_data_int[1].x < high_q)
? (q_data_int[1].x + (data_f[1].x > 0 ? 1 : -1))
: q_data_int[1].x;
q_data_int[1].y =
(rand.z < q_error[3] && q_data_int[1].y > low_q && q_data_int[1].y < high_q)
? (q_data_int[1].y + (data_f[1].y > 0 ? 1 : -1))
: q_data_int[1].y;
data_f[0].x = q_data_int[0].x / q_scale_val;
data_f[0].y = q_data_int[0].y / q_scale_val;
data_f[1].x = q_data_int[1].x / q_scale_val;
data_f[1].y = q_data_int[1].y / q_scale_val;
float2 result;
__half2* result_h = reinterpret_cast<__half2*>(&result);
result_h[0] = __float22half2_rn(data_f[0]);
result_h[1] = __float22half2_rn(data_f[1]);
vals_cast[offset + token_index] = result;
}
}
}
#endif
}
__global__ void sr_qunatize_kernel(float* vals,
int token_size,
int token_num,
int num_bits,
std::pair<uint64_t, uint64_t> seed)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
int idx = blockIdx.x * blockDim.x + id;
float4* vals_cast = reinterpret_cast<float4*>(vals);
float4 data[128];
int bid = blockIdx.x;
int tid = threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(seed.first, idx, seed.second, &state);
int group_index = bid * token_size + threadIdx.x;
int reg_count = 0;
int total_count = token_size * token_num;
if (group_index < total_count) {
// float min = 10000.0;
float max = -10000.0;
while (tid < token_size) {
data[reg_count] = vals_cast[group_index];
if (abs(data[reg_count].x) > max) max = abs(data[reg_count].x);
if (abs(data[reg_count].y) > max) max = abs(data[reg_count].y);
if (abs(data[reg_count].z) > max) max = abs(data[reg_count].z);
if (abs(data[reg_count].w) > max) max = abs(data[reg_count].w);
group_index += blockDim.x;
tid += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
__shared__ float partialMax[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
b.sync();
if (lane < warp_num) max = partialMax[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
max = g.shfl(max, 0);
float q_scale_val = (float)(1 << num_bits) / (max * 2 + 1e-5);
float high_q = (float)((1 << (num_bits - 1)) - 1);
float low_q = (float)(-((1 << (num_bits - 1))));
int offset = (bid)*token_size;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + threadIdx.x;
if (group_index < token_size) {
float4 q_data = data[i];
float4 q_data_int;
q_data_int.x = (float)((int)(q_data.x * q_scale_val));
q_data_int.y = (float)((int)(q_data.y * q_scale_val));
q_data_int.w = (float)((int)(q_data.w * q_scale_val));
q_data_int.z = (float)((int)(q_data.z * q_scale_val));
// Stochastic rounding
float4 rand = curand_uniform4(&state);
float q_error[4];
q_error[0] = abs(q_data.x - (q_data_int.x / q_scale_val)) * q_scale_val;
q_error[1] = abs(q_data.y - (q_data_int.y / q_scale_val)) * q_scale_val;
q_error[2] = abs(q_data.w - (q_data_int.w / q_scale_val)) * q_scale_val;
q_error[3] = abs(q_data.z - (q_data_int.z / q_scale_val)) * q_scale_val;
q_data_int.x =
(rand.x < q_error[0] && q_data_int.x > low_q && q_data_int.x < high_q)
? (q_data_int.x + (q_data.x > 0 ? 1 : -1))
: q_data_int.x;
q_data_int.y =
(rand.y < q_error[1] && q_data_int.y > low_q && q_data_int.y < high_q)
? (q_data_int.y + (q_data.y > 0 ? 1 : -1))
: q_data_int.y;
q_data_int.w =
(rand.w < q_error[2] && q_data_int.w > low_q && q_data_int.w < high_q)
? (q_data_int.w + (q_data.w > 0 ? 1 : -1))
: q_data_int.w;
q_data_int.z =
(rand.z < q_error[3] && q_data_int.z > low_q && q_data_int.z < high_q)
? (q_data_int.z + (q_data.z > 0 ? 1 : -1))
: q_data_int.z;
q_data_int.x /= q_scale_val;
q_data_int.y /= q_scale_val;
q_data_int.w /= q_scale_val;
q_data_int.z /= q_scale_val;
vals_cast[group_index + offset] = q_data_int;
}
}
}
}
template <typename T>
void launch_sr_qunatize_kernel(T* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream)
{
dim3 block_dim(1024);
dim3 grid_dim(group_num);
uint64_t inc = total_count / grid_dim.x / block_dim.x;
std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc);
sr_qunatize_kernel<<<grid_dim, block_dim, 0, stream>>>(
vals, (total_count / group_num) / 4, group_num, num_bits, seed);
}
template void launch_sr_qunatize_kernel(float* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream);
template void launch_sr_qunatize_kernel(__half* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream);
__global__ void qunatize_kernel_asym(__half* vals, int group_size, int num_bits)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
float2* vals_cast = reinterpret_cast<float2*>(vals);
float2 data[MAX_REG];
int group_id = blockIdx.x;
{
int group_index = id;
int reg_count = 0;
int offset = group_id * group_size;
float max = -10000.0;
float min = 10000.0;
while (group_index < group_size && reg_count < MAX_REG) {
data[reg_count] = vals_cast[offset + group_index];
__half* data_h = reinterpret_cast<__half*>(&data[reg_count]);
if (((float)data_h[0]) > max) max = (float)data_h[0];
if (((float)data_h[1]) > max) max = (float)data_h[1];
if (((float)data_h[2]) > max) max = (float)data_h[2];
if (((float)data_h[3]) > max) max = (float)data_h[3];
if (((float)data_h[0]) < min) min = (float)data_h[0];
if (((float)data_h[1]) < min) min = (float)data_h[1];
if (((float)data_h[2]) < min) min = (float)data_h[2];
if (((float)data_h[3]) < min) min = (float)data_h[3];
group_index += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(min, i);
if (min > temp) min = temp;
}
__shared__ float partialMax[WARP_SIZE];
__shared__ float partialMin[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
if (lane == 0) partialMin[gid] = min;
b.sync();
if (lane < warp_num) max = partialMax[lane];
if (lane < warp_num) min = partialMin[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(min, i);
if (min > temp) min = temp;
}
max = g.shfl(max, 0);
min = g.shfl(min, 0);
float q_scale = ((max - min) + 1e-5) / (float)(1 << num_bits);
float q_scale_inv = 1 / q_scale;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + id;
if (group_index < group_size) {
__half2* data_h = reinterpret_cast<__half2*>(&data[i]);
float2 q_data[2];
q_data[0] = __half22float2(data_h[0]);
q_data[1] = __half22float2(data_h[1]);
float2 q_data_int[2];
q_data_int[0].x = roundf((q_data[0].x - min) * q_scale_inv);
q_data_int[0].y = roundf((q_data[0].y - min) * q_scale_inv);
q_data_int[1].x = roundf((q_data[1].x - min) * q_scale_inv);
q_data_int[1].y = roundf((q_data[1].y - min) * q_scale_inv);
q_data_int[0].x = q_data_int[0].x * q_scale + min;
q_data_int[0].y = q_data_int[0].y * q_scale + min;
q_data_int[1].x = q_data_int[1].x * q_scale + min;
q_data_int[1].y = q_data_int[1].y * q_scale + min;
data_h[0] = __float22half2_rn(q_data_int[0]);
data_h[1] = __float22half2_rn(q_data_int[1]);
vals_cast[offset + group_index] = data[i];
}
}
}
#endif
}
__global__ void qunatize_kernel_asym(float* vals, int group_size, int num_bits)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
float4* vals_cast = reinterpret_cast<float4*>(vals);
float4 data[MAX_REG];
int bid = blockIdx.x;
int group_index = bid * group_size + id;
int reg_count = 0;
float max = -10000.0;
float min = 10000.0;
while (id < group_size && reg_count < MAX_REG) {
float4 data_reg = vals_cast[group_index];
data[reg_count] = data_reg;
if (data_reg.x > max) max = data_reg.x;
if (data_reg.y > max) max = data_reg.y;
if (data_reg.w > max) max = data_reg.w;
if (data_reg.z > max) max = data_reg.z;
if (data_reg.x < min) min = data_reg.x;
if (data_reg.y < min) min = data_reg.y;
if (data_reg.w < min) min = data_reg.w;
if (data_reg.z < min) min = data_reg.z;
group_index += blockDim.x;
id += blockDim.x;
reg_count++;
}
id = threadIdx.x;
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(min, i);
if (min > temp) min = temp;
}
__shared__ float partialMax[WARP_SIZE];
__shared__ float partialMin[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
if (lane == 0) partialMin[gid] = min;
b.sync();
if (lane < warp_num) max = partialMax[lane];
if (lane < warp_num) min = partialMin[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(min, i);
if (min > temp) min = temp;
}
max = g.shfl(max, 0);
min = g.shfl(min, 0);
float q_scale = ((max - min) + 1e-5) / (float)(1 << num_bits);
float q_scale_inv = 1 / q_scale;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + id;
if (group_index < group_size) {
float4 q_data;
q_data = data[i];
float4 q_data_int;
q_data_int.x = roundf((q_data.x - min) * q_scale_inv);
q_data_int.y = roundf((q_data.y - min) * q_scale_inv);
q_data_int.w = roundf((q_data.w - min) * q_scale_inv);
q_data_int.z = roundf((q_data.z - min) * q_scale_inv);
q_data.x = q_data_int.x * q_scale + min;
q_data.y = q_data_int.y * q_scale + min;
q_data.w = q_data_int.w * q_scale + min;
q_data.z = q_data_int.z * q_scale + min;
vals_cast[group_index + bid * group_size] = q_data;
}
}
}
template <typename T>
void launch_qunatize_kernel_asym(T* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream)
{
dim3 grid_dim(group_num);
dim3 block_dim(1024);
qunatize_kernel_asym<<<grid_dim, block_dim, 0, stream>>>(
vals, (total_count / group_num) / 4, num_bits);
}
template void launch_qunatize_kernel_asym(float* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream);
template void launch_qunatize_kernel_asym(__half* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream);
__global__ void sr_qunatize_kernel_asym(__half* vals,
int token_size,
int token_num,
int num_bits,
std::pair<uint64_t, uint64_t> seed)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float2* vals_cast = reinterpret_cast<float2*>(vals);
__half2 data_low[128];
__half2 data_high[128];
int bid = blockIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(seed.first, idx, seed.second, &state);
unsigned int tid = threadIdx.x;
int reg_count = 0;
int offset = bid * token_size;
int group_index = bid * token_size + tid;
int total_count = token_size * token_num;
if (group_index < total_count) {
float min = 10000.0;
float max = -10000.0;
while (tid < token_size) {
float2 data = vals_cast[offset + tid];
__half2* data_h = reinterpret_cast<__half2*>(&data);
data_low[reg_count] = data_h[0];
data_high[reg_count] = data_h[1];
float2 data_f[2];
data_f[0] = __half22float2(data_h[0]);
data_f[1] = __half22float2(data_h[1]);
if (((float)data_f[0].x) > max) max = (float)data_f[0].x;
if (((float)data_f[0].y) > max) max = (float)data_f[0].y;
if (((float)data_f[1].x) > max) max = (float)data_f[1].x;
if (((float)data_f[1].y) > max) max = (float)data_f[1].y;
if (((float)data_f[0].x) < min) min = (float)data_f[0].x;
if (((float)data_f[0].y) < min) min = (float)data_f[0].y;
if (((float)data_f[1].x) < min) min = (float)data_f[1].x;
if (((float)data_f[1].y) < min) min = (float)data_f[1].y;
tid += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(min, i);
if (min > temp) min = temp;
}
__shared__ float partialMax[WARP_SIZE];
__shared__ float partialMin[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
if (lane == 0) partialMin[gid] = min;
b.sync();
if (lane < warp_num) max = partialMax[lane];
if (lane < warp_num) min = partialMin[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(min, i);
if (min > temp) min = temp;
}
max = g.shfl(max, 0);
min = g.shfl(min, 0);
float q_scale_val = ((max - min) + 1e-5) / (float)(1 << num_bits);
float q_scale_val_inv = 1 / q_scale_val;
float high_q = (float)((1 << num_bits) - 1);
for (int i = 0; i < reg_count; i++) {
int token_index = i * blockDim.x + threadIdx.x;
if (token_index < token_size) {
float2 data_f[2];
data_f[0] = __half22float2(data_low[i]);
data_f[1] = __half22float2(data_high[i]);
float2 q_data_int[2];
q_data_int[0].x = (float)((unsigned int)((data_f[0].x - min) * q_scale_val_inv));
q_data_int[0].y = (float)((unsigned int)((data_f[0].y - min) * q_scale_val_inv));
q_data_int[1].x = (float)((unsigned int)((data_f[1].x - min) * q_scale_val_inv));
q_data_int[1].y = (float)((unsigned int)((data_f[1].y - min) * q_scale_val_inv));
// Stochastic rounding
float4 rand = curand_uniform4(&state);
float q_error[4];
q_error[0] =
abs(data_f[0].x - ((q_data_int[0].x * q_scale_val) + min)) * q_scale_val_inv;
q_error[1] =
abs(data_f[0].y - ((q_data_int[0].y * q_scale_val) + min)) * q_scale_val_inv;
q_error[2] =
abs(data_f[1].x - ((q_data_int[1].x * q_scale_val) + min)) * q_scale_val_inv;
q_error[3] =
abs(data_f[1].y - ((q_data_int[1].y * q_scale_val) + min)) * q_scale_val_inv;
q_data_int[0].x = (rand.x < q_error[0] && q_data_int[0].x < high_q)
? (q_data_int[0].x + 1)
: q_data_int[0].x;
q_data_int[0].y = (rand.y < q_error[1] && q_data_int[0].y < high_q)
? (q_data_int[0].y + 1)
: q_data_int[0].y;
q_data_int[1].x = (rand.w < q_error[2] && q_data_int[1].x < high_q)
? (q_data_int[1].x + 1)
: q_data_int[1].x;
q_data_int[1].y = (rand.z < q_error[3] && q_data_int[1].y < high_q)
? (q_data_int[1].y + 1)
: q_data_int[1].y;
data_f[0].x = q_data_int[0].x * q_scale_val + min;
data_f[0].y = q_data_int[0].y * q_scale_val + min;
data_f[1].x = q_data_int[1].x * q_scale_val + min;
data_f[1].y = q_data_int[1].y * q_scale_val + min;
float2 result;
__half2* result_h = reinterpret_cast<__half2*>(&result);
result_h[0] = __float22half2_rn(data_f[0]);
result_h[1] = __float22half2_rn(data_f[1]);
vals_cast[offset + token_index] = result;
}
}
}
#endif
}
__global__ void sr_qunatize_kernel_asym(float* vals,
int token_size,
int token_num,
int num_bits,
std::pair<uint64_t, uint64_t> seed)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
int idx = blockIdx.x * blockDim.x + id;
float4* vals_cast = reinterpret_cast<float4*>(vals);
float4 data[128];
int bid = blockIdx.x;
int tid = threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(seed.first, idx, seed.second, &state);
int group_index = bid * token_size + threadIdx.x;
int reg_count = 0;
int total_count = token_size * token_num;
if (group_index < total_count) {
float min = 10000.0;
float max = -10000.0;
while (tid < token_size) {
float4 data_reg = vals_cast[group_index];
data[reg_count] = data_reg;
if (data_reg.x > max) max = data_reg.x;
if (data_reg.y > max) max = data_reg.y;
if (data_reg.w > max) max = data_reg.w;
if (data_reg.z > max) max = data_reg.z;
if (data_reg.x < min) min = data_reg.x;
if (data_reg.y < min) min = data_reg.y;
if (data_reg.w < min) min = data_reg.w;
if (data_reg.z < min) min = data_reg.z;
group_index += blockDim.x;
tid += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(min, i);
if (min > temp) min = temp;
}
__shared__ float partialMax[WARP_SIZE];
__shared__ float partialMin[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
if (lane == 0) partialMin[gid] = min;
b.sync();
if (lane < warp_num) max = partialMax[lane];
if (lane < warp_num) min = partialMin[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(min, i);
if (min > temp) min = temp;
}
max = g.shfl(max, 0);
min = g.shfl(min, 0);
float q_scale_val = ((max - min) + 1e-5) / (float)(1 << num_bits);
float high_q = (float)((1 << num_bits) - 1);
int offset = (bid)*token_size;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + threadIdx.x;
if (group_index < token_size) {
float4 q_data = data[i];
float4 q_data_int;
q_data_int.x = (float)((int)((q_data.x - min) / q_scale_val));
q_data_int.y = (float)((int)((q_data.y - min) / q_scale_val));
q_data_int.w = (float)((int)((q_data.w - min) / q_scale_val));
q_data_int.z = (float)((int)((q_data.z - min) / q_scale_val));
// Stochastic rounding
float4 rand = curand_uniform4(&state);
float q_error[4];
q_error[0] = abs(q_data.x - ((q_data_int.x * q_scale_val) + min)) / q_scale_val;
q_error[1] = abs(q_data.y - ((q_data_int.y * q_scale_val) + min)) / q_scale_val;
q_error[2] = abs(q_data.w - ((q_data_int.w * q_scale_val) + min)) / q_scale_val;
q_error[3] = abs(q_data.z - ((q_data_int.z * q_scale_val) + min)) / q_scale_val;
q_data_int.x = (rand.x < q_error[0] && q_data_int.x < high_q) ? (q_data_int.x + 1)
: q_data_int.x;
q_data_int.y = (rand.y < q_error[1] && q_data_int.y < high_q) ? (q_data_int.y + 1)
: q_data_int.y;
q_data_int.w = (rand.w < q_error[2] && q_data_int.w < high_q) ? (q_data_int.w + 1)
: q_data_int.w;
q_data_int.z = (rand.z < q_error[3] && q_data_int.z < high_q) ? (q_data_int.z + 1)
: q_data_int.z;
q_data_int.x = q_data_int.x * q_scale_val + min;
q_data_int.y = q_data_int.y * q_scale_val + min;
q_data_int.w = q_data_int.w * q_scale_val + min;
q_data_int.z = q_data_int.z * q_scale_val + min;
vals_cast[group_index + offset] = q_data_int;
}
}
}
}
template <typename T>
void launch_sr_qunatize_kernel_asym(T* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream)
{
dim3 block_dim(1024);
dim3 grid_dim(group_num);
uint64_t inc = total_count / grid_dim.x / block_dim.x;
std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc);
sr_qunatize_kernel<<<grid_dim, block_dim, 0, stream>>>(
vals, (total_count / group_num) / 4, group_num, num_bits, seed);
}
template void launch_sr_qunatize_kernel_asym(float* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream);
template void launch_sr_qunatize_kernel_asym(__half* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream);
|
c4b831cbcfbcf93bb803e8c323092ac52670f2de.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <chrono>
#include <random>
#include <vector>
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <random>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "csv.hpp"
using namespace std;
// A small data structure to do RAII for a dataset of 2-dimensional points.
struct Data {
explicit Data(int size) : size(size), bytes(size * sizeof(float)) {
hipMalloc(&x, bytes);
hipMalloc(&y, bytes);
}
Data(int size, std::vector<float>& h_x, std::vector<float>& h_y)
: size(size), bytes(size * sizeof(float)) {
hipMalloc(&x, bytes);
hipMalloc(&y, bytes);
hipMemcpy(x, h_x.data(), bytes, hipMemcpyHostToDevice);
hipMemcpy(y, h_y.data(), bytes, hipMemcpyHostToDevice);
}
~Data() {
hipFree(x);
hipFree(y);
}
void clear() {
hipMemset(x, 0, bytes);
hipMemset(y, 0, bytes);
}
float* x{nullptr};
float* y{nullptr};
int size{0};
int bytes{0};
};
__device__ float
squared_l2_distance(float x_1, float y_1, float x_2, float y_2) {
return (x_1 - x_2) * (x_1 - x_2) + (y_1 - y_2) * (y_1 - y_2);
}
// In the assignment step, each point (thread) computes its distance to each
// cluster centroid and adds its x and y values to the sum of its closest
// centroid, as well as incrementing that centroid's count of assigned points.
__global__ void assign_clusters(const thrust::device_ptr<float> data_x,
const thrust::device_ptr<float> data_y,
int data_size,
const thrust::device_ptr<float> means_x,
const thrust::device_ptr<float> means_y,
thrust::device_ptr<float> new_sums_x,
thrust::device_ptr<float> new_sums_y,
int k,
thrust::device_ptr<int> counts,
int* clusterNo) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= data_size) return;
// int* clusterNo;
// Make global loads once.
const float x = data_x[index];
const float y = data_y[index];
float best_distance = FLT_MAX;
int best_cluster = 0;
for (int cluster = 0; cluster < k; ++cluster) {
const float distance =
squared_l2_distance(x, y, means_x[cluster], means_y[cluster]);
if (distance < best_distance) {
best_distance = distance;
best_cluster = cluster;
clusterNo[index] = best_cluster;
}
}
atomicAdd(thrust::raw_pointer_cast(new_sums_x + best_cluster), x);
atomicAdd(thrust::raw_pointer_cast(new_sums_y + best_cluster), y);
atomicAdd(thrust::raw_pointer_cast(counts + best_cluster), 1);
}
// Each thread is one cluster, which just recomputes its coordinates as the mean
// of all points assigned to it.
__global__ void compute_new_means(thrust::device_ptr<float> means_x,
thrust::device_ptr<float> means_y,
const thrust::device_ptr<float> new_sum_x,
const thrust::device_ptr<float> new_sum_y,
const thrust::device_ptr<int> counts) {
const int cluster = threadIdx.x;
const int count = max(1, counts[cluster]);
means_x[cluster] = new_sum_x[cluster] / count;
means_y[cluster] = new_sum_y[cluster] / count;
}
int main(int argc, const char* argv[]) {
// std::vector<float> h_x;
// std::vector<float> h_y;
// Load x and y into host vectors ... (omitted)
int SHIFT = atoi(argv[1]);
int k = 4;
int number_of_iterations = 1000;
int N = 1024 << SHIFT;
cout << N << endl;
std::vector<float> h_x(N);
std::vector<float> h_y(N);
std::random_device rnd;
std::mt19937 mt(rnd());
std::uniform_int_distribution<> rand100(0, 99);
std::uniform_int_distribution<> rand200(100, 199);
std::uniform_int_distribution<> rand300(200, 299);
std::uniform_int_distribution<> rand400(300, 399);
for(int i = 0; i < N; i++)
{
if (i % 4 == 0)
h_x[i] = rand100(mt);
if (i % 4 == 1)
h_x[i] = rand200(mt);
if (i % 4 == 2)
h_x[i] = rand300(mt);
if (i % 4 == 3)
h_x[i] = rand400(mt);
if (i % 4 == 0)
h_y[i] = rand100(mt);
if (i % 4 == 1)
h_y[i] = rand200(mt);
if (i % 4 == 2)
h_y[i] = rand300(mt);
if (i % 4 == 3)
h_y[i] = rand400(mt);
}
/*
int N = atoi(argv[2]);
int k = 3;
int number_of_iterations = 1000;
const string csv_file = std::string(argv[1]);
vector<vector<string>> data2;
Csv objCsv(csv_file);
if (!objCsv.getCsv(data2)) {
cout << "read ERROR" << endl;
return 1;
}
for (int row = 0; row < 1024; row++) {
vector<string> rec = data2[row];
h_x.push_back(std::stof(rec[0]));
h_y.push_back(std::stof(rec[1]));
}
*/
const size_t number_of_elements = h_x.size();
int* h_clusterNo;
h_clusterNo = (int *)malloc(N * sizeof(int));
int* d_clusterNo;
hipMalloc(&d_clusterNo, N * sizeof(int));
hipMemset(d_clusterNo, 0, N * sizeof(int));
thrust::device_vector<float> d_x = h_x;
thrust::device_vector<float> d_y = h_y;
std::mt19937 rng(std::random_device{}());
std::shuffle(h_x.begin(), h_x.end(), rng);
std::shuffle(h_y.begin(), h_y.end(), rng);
thrust::device_vector<float> d_mean_x(h_x.begin(), h_x.begin() + k);
thrust::device_vector<float> d_mean_y(h_y.begin(), h_y.begin() + k);
thrust::device_vector<float> d_sums_x(k);
thrust::device_vector<float> d_sums_y(k);
thrust::device_vector<int> d_counts(k, 0);
const int threads = 1024;
const int blocks = (number_of_elements + threads - 1) / threads;
for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) {
thrust::fill(d_sums_x.begin(), d_sums_x.end(), 0);
thrust::fill(d_sums_y.begin(), d_sums_y.end(), 0);
thrust::fill(d_counts.begin(), d_counts.end(), 0);
hipLaunchKernelGGL(( assign_clusters), dim3(blocks), dim3(threads), 0, 0, d_x.data(),
d_y.data(),
number_of_elements,
d_mean_x.data(),
d_mean_y.data(),
d_sums_x.data(),
d_sums_y.data(),
k,
d_counts.data(),
d_clusterNo);
hipDeviceSynchronize();
hipLaunchKernelGGL(( compute_new_means), dim3(1), dim3(k), 0, 0, d_mean_x.data(),
d_mean_y.data(),
d_sums_x.data(),
d_sums_y.data(),
d_counts.data());
hipDeviceSynchronize();
}
hipMemcpy(h_clusterNo, d_clusterNo, N * sizeof(int), hipMemcpyDeviceToHost);
std::remove("tmp");
ofstream outputfile("tmp");
for(int i=0; i < N; i++)
{
outputfile << h_x[i] << "," << h_y[i] << "," << h_clusterNo[i] << std::endl;
// std::cout << h_x[i] << "," << h_y[i] << "," << h_clusterNo[i] << std::endl;
}
/*
hipMemcpy(h_clusterNo, d_clusterNo, N * sizeof(int), hipMemcpyDeviceToHost);
for(int i=0; i < N; i++)
std::cout << h_x[i] << "," << h_y[i] << "," << h_clusterNo[i] << std::endl;
*/
}
|
c4b831cbcfbcf93bb803e8c323092ac52670f2de.cu
|
#include <algorithm>
#include <cfloat>
#include <chrono>
#include <random>
#include <vector>
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <random>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "csv.hpp"
using namespace std;
// A small data structure to do RAII for a dataset of 2-dimensional points.
struct Data {
explicit Data(int size) : size(size), bytes(size * sizeof(float)) {
cudaMalloc(&x, bytes);
cudaMalloc(&y, bytes);
}
Data(int size, std::vector<float>& h_x, std::vector<float>& h_y)
: size(size), bytes(size * sizeof(float)) {
cudaMalloc(&x, bytes);
cudaMalloc(&y, bytes);
cudaMemcpy(x, h_x.data(), bytes, cudaMemcpyHostToDevice);
cudaMemcpy(y, h_y.data(), bytes, cudaMemcpyHostToDevice);
}
~Data() {
cudaFree(x);
cudaFree(y);
}
void clear() {
cudaMemset(x, 0, bytes);
cudaMemset(y, 0, bytes);
}
float* x{nullptr};
float* y{nullptr};
int size{0};
int bytes{0};
};
__device__ float
squared_l2_distance(float x_1, float y_1, float x_2, float y_2) {
return (x_1 - x_2) * (x_1 - x_2) + (y_1 - y_2) * (y_1 - y_2);
}
// In the assignment step, each point (thread) computes its distance to each
// cluster centroid and adds its x and y values to the sum of its closest
// centroid, as well as incrementing that centroid's count of assigned points.
__global__ void assign_clusters(const thrust::device_ptr<float> data_x,
const thrust::device_ptr<float> data_y,
int data_size,
const thrust::device_ptr<float> means_x,
const thrust::device_ptr<float> means_y,
thrust::device_ptr<float> new_sums_x,
thrust::device_ptr<float> new_sums_y,
int k,
thrust::device_ptr<int> counts,
int* clusterNo) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= data_size) return;
// int* clusterNo;
// Make global loads once.
const float x = data_x[index];
const float y = data_y[index];
float best_distance = FLT_MAX;
int best_cluster = 0;
for (int cluster = 0; cluster < k; ++cluster) {
const float distance =
squared_l2_distance(x, y, means_x[cluster], means_y[cluster]);
if (distance < best_distance) {
best_distance = distance;
best_cluster = cluster;
clusterNo[index] = best_cluster;
}
}
atomicAdd(thrust::raw_pointer_cast(new_sums_x + best_cluster), x);
atomicAdd(thrust::raw_pointer_cast(new_sums_y + best_cluster), y);
atomicAdd(thrust::raw_pointer_cast(counts + best_cluster), 1);
}
// Each thread is one cluster, which just recomputes its coordinates as the mean
// of all points assigned to it.
__global__ void compute_new_means(thrust::device_ptr<float> means_x,
thrust::device_ptr<float> means_y,
const thrust::device_ptr<float> new_sum_x,
const thrust::device_ptr<float> new_sum_y,
const thrust::device_ptr<int> counts) {
const int cluster = threadIdx.x;
const int count = max(1, counts[cluster]);
means_x[cluster] = new_sum_x[cluster] / count;
means_y[cluster] = new_sum_y[cluster] / count;
}
int main(int argc, const char* argv[]) {
// std::vector<float> h_x;
// std::vector<float> h_y;
// Load x and y into host vectors ... (omitted)
int SHIFT = atoi(argv[1]);
int k = 4;
int number_of_iterations = 1000;
int N = 1024 << SHIFT;
cout << N << endl;
std::vector<float> h_x(N);
std::vector<float> h_y(N);
std::random_device rnd;
std::mt19937 mt(rnd());
std::uniform_int_distribution<> rand100(0, 99);
std::uniform_int_distribution<> rand200(100, 199);
std::uniform_int_distribution<> rand300(200, 299);
std::uniform_int_distribution<> rand400(300, 399);
for(int i = 0; i < N; i++)
{
if (i % 4 == 0)
h_x[i] = rand100(mt);
if (i % 4 == 1)
h_x[i] = rand200(mt);
if (i % 4 == 2)
h_x[i] = rand300(mt);
if (i % 4 == 3)
h_x[i] = rand400(mt);
if (i % 4 == 0)
h_y[i] = rand100(mt);
if (i % 4 == 1)
h_y[i] = rand200(mt);
if (i % 4 == 2)
h_y[i] = rand300(mt);
if (i % 4 == 3)
h_y[i] = rand400(mt);
}
/*
int N = atoi(argv[2]);
int k = 3;
int number_of_iterations = 1000;
const string csv_file = std::string(argv[1]);
vector<vector<string>> data2;
Csv objCsv(csv_file);
if (!objCsv.getCsv(data2)) {
cout << "read ERROR" << endl;
return 1;
}
for (int row = 0; row < 1024; row++) {
vector<string> rec = data2[row];
h_x.push_back(std::stof(rec[0]));
h_y.push_back(std::stof(rec[1]));
}
*/
const size_t number_of_elements = h_x.size();
int* h_clusterNo;
h_clusterNo = (int *)malloc(N * sizeof(int));
int* d_clusterNo;
cudaMalloc(&d_clusterNo, N * sizeof(int));
cudaMemset(d_clusterNo, 0, N * sizeof(int));
thrust::device_vector<float> d_x = h_x;
thrust::device_vector<float> d_y = h_y;
std::mt19937 rng(std::random_device{}());
std::shuffle(h_x.begin(), h_x.end(), rng);
std::shuffle(h_y.begin(), h_y.end(), rng);
thrust::device_vector<float> d_mean_x(h_x.begin(), h_x.begin() + k);
thrust::device_vector<float> d_mean_y(h_y.begin(), h_y.begin() + k);
thrust::device_vector<float> d_sums_x(k);
thrust::device_vector<float> d_sums_y(k);
thrust::device_vector<int> d_counts(k, 0);
const int threads = 1024;
const int blocks = (number_of_elements + threads - 1) / threads;
for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) {
thrust::fill(d_sums_x.begin(), d_sums_x.end(), 0);
thrust::fill(d_sums_y.begin(), d_sums_y.end(), 0);
thrust::fill(d_counts.begin(), d_counts.end(), 0);
assign_clusters<<<blocks, threads>>>(d_x.data(),
d_y.data(),
number_of_elements,
d_mean_x.data(),
d_mean_y.data(),
d_sums_x.data(),
d_sums_y.data(),
k,
d_counts.data(),
d_clusterNo);
cudaDeviceSynchronize();
compute_new_means<<<1, k>>>(d_mean_x.data(),
d_mean_y.data(),
d_sums_x.data(),
d_sums_y.data(),
d_counts.data());
cudaDeviceSynchronize();
}
cudaMemcpy(h_clusterNo, d_clusterNo, N * sizeof(int), cudaMemcpyDeviceToHost);
std::remove("tmp");
ofstream outputfile("tmp");
for(int i=0; i < N; i++)
{
outputfile << h_x[i] << "," << h_y[i] << "," << h_clusterNo[i] << std::endl;
// std::cout << h_x[i] << "," << h_y[i] << "," << h_clusterNo[i] << std::endl;
}
/*
cudaMemcpy(h_clusterNo, d_clusterNo, N * sizeof(int), cudaMemcpyDeviceToHost);
for(int i=0; i < N; i++)
std::cout << h_x[i] << "," << h_y[i] << "," << h_clusterNo[i] << std::endl;
*/
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.