hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
a2236a0d752d2f1e60afbd94398d5085657da074.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ double dev_f_B_3(double X)
{
double absV=fabs(X);
if(absV<2)
{
if(absV>=1)
return 0.25*(2.0-absV)*(2.0-absV)*(2.0-absV);
else
return 1.0 - 1.5*absV*absV*(1.0 - 0.5*absV);
}
return 0.0;
}
__host__ double host_f_B_3(double X)
{
double absV=fabs(X);
if(absV<2)
{
if(absV>=1)
{
return 0.25*(2.0-absV)*(2.0-absV)*(2.0-absV);
}
else
return 1.0 - 1.5*absV*absV*(1.0 - 0.5*absV);
}
return 0.0;
}
__device__ double (*phi)(double, double, int);
__device__ double phi_b3 (double x, double y, int n)
{
//polynomial basis
// return pow(x,n%N)*pow(y,n/N);
double cubic_stepx = (X1-X0)/(double)(N-1);
double cubic_stepy = (Y1-Y0)/(double)(N-1);
return dev_f_B_3((N-1)/(X1-X0)*(x-X0-cubic_stepx*(double)(n%(N))))*
dev_f_B_3((N-1)/(Y1-Y0)*(y-Y0-cubic_stepy*(double)(n/(N))));
}
__global__ void set_b3()
{
phi = &phi_b3;
}
__device__ double omega(double x, double y)
{
return (x-X0)*(x-X1)*(y-Y0)*(y-Y1);
}
__device__ double structure(double x, double y, int n)
{
//structure for 1st boundary problem
return phi(x,y,n)*omega(x,y);
//structure for 2nd boundary problem
// return phi(x,y,n)-omega(x,y)*
// ((omega(x+diff_step,y)-omega(x-diff_step,y))*(phi(x+diff_step,y,n)-phi(x-diff_step,y,n))
// +(omega(x,y+diff_step)-omega(x,y-diff_step))*(phi(x,y+diff_step,n)-phi(x,y-diff_step,n)))*
// glob_delta*glob_delta*0.25;
}
__device__ double right_part_f(double x, double y)
{
return 12.*(y*y*(x*x*x*x-1.) + x*x*(y*y*y*y-1.));
}
__host__ double Hphi (double x, double y, int n)
{
//polynomial basis
// return pow(x,n%N)*pow(y,n/N);
double cubic_stepx = (X1-X0)/(double)(N-1);
double cubic_stepy = (Y1-Y0)/(double)(N-1);
return host_f_B_3((N-1)/(X1-X0)*(x-X0-cubic_stepx*(double)(n%(N))))*
host_f_B_3((N-1)/(Y1-Y0)*(y-Y0-cubic_stepy*(double)(n/(N))));
}
__host__ double Homega(double x, double y)
{
return (x-X0)*(x-X1)*(y-Y0)*(y-Y1);
}
__host__ double Hstructure(double x, double y, int n)
{
//structure for 1st boundary problem
return Hphi(x,y,n)*Homega(x,y);
//structure for 2nd boundary problem
// return Hphi(x,y,n)-Homega(x,y)*
// ((Homega(x+diff_step,y)-Homega(x-diff_step,y))*(Hphi(x+diff_step,y,n)-Hphi(x-diff_step,y,n))
// +(Homega(x,y+diff_step)-Homega(x,y-diff_step))*(Hphi(x,y+diff_step,n)-Hphi(x,y-diff_step,n)))*
// glob_delta*glob_delta*0.25;
}
|
a2236a0d752d2f1e60afbd94398d5085657da074.cu
|
__device__ double dev_f_B_3(double X)
{
double absV=fabs(X);
if(absV<2)
{
if(absV>=1)
return 0.25*(2.0-absV)*(2.0-absV)*(2.0-absV);
else
return 1.0 - 1.5*absV*absV*(1.0 - 0.5*absV);
}
return 0.0;
}
__host__ double host_f_B_3(double X)
{
double absV=fabs(X);
if(absV<2)
{
if(absV>=1)
{
return 0.25*(2.0-absV)*(2.0-absV)*(2.0-absV);
}
else
return 1.0 - 1.5*absV*absV*(1.0 - 0.5*absV);
}
return 0.0;
}
__device__ double (*phi)(double, double, int);
__device__ double phi_b3 (double x, double y, int n)
{
//polynomial basis
// return pow(x,n%N)*pow(y,n/N);
double cubic_stepx = (X1-X0)/(double)(N-1);
double cubic_stepy = (Y1-Y0)/(double)(N-1);
return dev_f_B_3((N-1)/(X1-X0)*(x-X0-cubic_stepx*(double)(n%(N))))*
dev_f_B_3((N-1)/(Y1-Y0)*(y-Y0-cubic_stepy*(double)(n/(N))));
}
__global__ void set_b3()
{
phi = &phi_b3;
}
__device__ double omega(double x, double y)
{
return (x-X0)*(x-X1)*(y-Y0)*(y-Y1);
}
__device__ double structure(double x, double y, int n)
{
//structure for 1st boundary problem
return phi(x,y,n)*omega(x,y);
//structure for 2nd boundary problem
// return phi(x,y,n)-omega(x,y)*
// ((omega(x+diff_step,y)-omega(x-diff_step,y))*(phi(x+diff_step,y,n)-phi(x-diff_step,y,n))
// +(omega(x,y+diff_step)-omega(x,y-diff_step))*(phi(x,y+diff_step,n)-phi(x,y-diff_step,n)))*
// glob_delta*glob_delta*0.25;
}
__device__ double right_part_f(double x, double y)
{
return 12.*(y*y*(x*x*x*x-1.) + x*x*(y*y*y*y-1.));
}
__host__ double Hphi (double x, double y, int n)
{
//polynomial basis
// return pow(x,n%N)*pow(y,n/N);
double cubic_stepx = (X1-X0)/(double)(N-1);
double cubic_stepy = (Y1-Y0)/(double)(N-1);
return host_f_B_3((N-1)/(X1-X0)*(x-X0-cubic_stepx*(double)(n%(N))))*
host_f_B_3((N-1)/(Y1-Y0)*(y-Y0-cubic_stepy*(double)(n/(N))));
}
__host__ double Homega(double x, double y)
{
return (x-X0)*(x-X1)*(y-Y0)*(y-Y1);
}
__host__ double Hstructure(double x, double y, int n)
{
//structure for 1st boundary problem
return Hphi(x,y,n)*Homega(x,y);
//structure for 2nd boundary problem
// return Hphi(x,y,n)-Homega(x,y)*
// ((Homega(x+diff_step,y)-Homega(x-diff_step,y))*(Hphi(x+diff_step,y,n)-Hphi(x-diff_step,y,n))
// +(Homega(x,y+diff_step)-Homega(x,y-diff_step))*(Hphi(x,y+diff_step,n)-Hphi(x,y-diff_step,n)))*
// glob_delta*glob_delta*0.25;
}
|
332fca6e8c47c626d1f94ce8761f9b5aa8c07b7b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/common/nd_index_offset_helper.h"
namespace oneflow {
namespace {
const int32_t NDIMS = 16;
struct SIZE_V {
int32_t val[NDIMS];
};
struct VIS {
bool val[NDIMS] = {false};
};
template<typename T>
__global__ void FlipGpuForward(const int32_t element, const int64_t total_dims,
const SIZE_V stride_contiguous_v, const SIZE_V sizes_v,
const VIS vis, SIZE_V strides_v, const T* in_dptr, T* out_dptr) {
CUDA_1D_KERNEL_LOOP(i, element) {
int32_t cur_indices = i;
int32_t rem = 0;
int32_t dst_offset = 0;
for (int32_t d = 0; d < total_dims; d++) {
int32_t temp = cur_indices;
cur_indices = cur_indices / stride_contiguous_v.val[d];
rem = temp - cur_indices * stride_contiguous_v.val[d];
dst_offset += vis.val[d] ? (sizes_v.val[d] - 1 - cur_indices) * strides_v.val[d]
: cur_indices * strides_v.val[d];
cur_indices = rem;
}
out_dptr[i] = in_dptr[dst_offset];
}
}
} // namespace
template<typename T>
class FlipGpuKernel final : public user_op::OpKernel {
public:
FlipGpuKernel() = default;
~FlipGpuKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x_tensor = ctx->Tensor4ArgNameAndIndex("x", 0);
user_op::Tensor* y_tensor = ctx->Tensor4ArgNameAndIndex("y", 0);
const int32_t elem_cnt = y_tensor->shape().elem_cnt();
const int32_t total_dims = y_tensor->shape().NumAxes();
std::vector<int32_t> dims = ctx->Attr<std::vector<int32_t>>("dims");
VIS vis;
for (auto x : dims) { vis.val[x] = true; }
SIZE_V sizes_v;
for (int32_t i = 0; i < total_dims; i++) { sizes_v.val[i] = y_tensor->shape().At(i); }
// TODO(bbuf) delete strides caluculate, after tensor strides supported
SIZE_V strides_v;
strides_v.val[total_dims - 1] = 1;
for (int32_t i = total_dims - 2; i >= 0; i--) {
strides_v.val[i] = strides_v.val[i + 1] * y_tensor->shape().At(i + 1);
}
SIZE_V stride_contiguous_v;
for (int32_t i = total_dims - 1; i >= 0; i--) {
if (i == total_dims - 1) {
stride_contiguous_v.val[i] = 1;
} else {
stride_contiguous_v.val[i] =
std::max<int32_t>(x_tensor->shape().At(i + 1), 1) * stride_contiguous_v.val[i + 1];
}
}
RUN_CUDA_KERNEL((FlipGpuForward<T>), ctx->stream(), elem_cnt, elem_cnt, total_dims,
stride_contiguous_v, sizes_v, vis, strides_v, x_tensor->dptr<T>(),
y_tensor->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
template<typename T>
class FlipGrad1DGpuKernel final : public user_op::OpKernel {
public:
FlipGrad1DGpuKernel() = default;
~FlipGrad1DGpuKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
user_op::Tensor* dx_tensor = ctx->Tensor4ArgNameAndIndex("dx", 0);
Memset<DeviceType::kGPU>(ctx->stream(), dx_tensor->mut_dptr<T>(), 0,
dx_tensor->shape().elem_cnt() * sizeof(T));
const user_op::Tensor* dy_tensor = ctx->Tensor4ArgNameAndIndex("dy", 0);
Memcpy<DeviceType::kGPU>(
ctx->stream(), dx_tensor->mut_dptr<void>(), dy_tensor->dptr<void>(),
dy_tensor->shape().elem_cnt() * GetSizeOfDataType(dy_tensor->data_type()));
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_FLIP_GPU_KERNEL(dtype) \
REGISTER_USER_KERNEL("flip").SetCreateFn<FlipGpuKernel<dtype>>().SetIsMatchedHob( \
(user_op::HobDeviceType() == DeviceType::kGPU) \
&& (user_op::HobDataType("y", 0) == GetDataType<dtype>::value)); \
REGISTER_USER_KERNEL("flip_grad") \
.SetCreateFn<FlipGrad1DGpuKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \
&& (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value));
REGISTER_FLIP_GPU_KERNEL(float)
REGISTER_FLIP_GPU_KERNEL(double)
REGISTER_FLIP_GPU_KERNEL(uint8_t)
REGISTER_FLIP_GPU_KERNEL(int8_t)
REGISTER_FLIP_GPU_KERNEL(int32_t)
REGISTER_FLIP_GPU_KERNEL(int64_t)
} // namespace oneflow
|
332fca6e8c47c626d1f94ce8761f9b5aa8c07b7b.cu
|
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/common/nd_index_offset_helper.h"
namespace oneflow {
namespace {
const int32_t NDIMS = 16;
struct SIZE_V {
int32_t val[NDIMS];
};
struct VIS {
bool val[NDIMS] = {false};
};
template<typename T>
__global__ void FlipGpuForward(const int32_t element, const int64_t total_dims,
const SIZE_V stride_contiguous_v, const SIZE_V sizes_v,
const VIS vis, SIZE_V strides_v, const T* in_dptr, T* out_dptr) {
CUDA_1D_KERNEL_LOOP(i, element) {
int32_t cur_indices = i;
int32_t rem = 0;
int32_t dst_offset = 0;
for (int32_t d = 0; d < total_dims; d++) {
int32_t temp = cur_indices;
cur_indices = cur_indices / stride_contiguous_v.val[d];
rem = temp - cur_indices * stride_contiguous_v.val[d];
dst_offset += vis.val[d] ? (sizes_v.val[d] - 1 - cur_indices) * strides_v.val[d]
: cur_indices * strides_v.val[d];
cur_indices = rem;
}
out_dptr[i] = in_dptr[dst_offset];
}
}
} // namespace
template<typename T>
class FlipGpuKernel final : public user_op::OpKernel {
public:
FlipGpuKernel() = default;
~FlipGpuKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x_tensor = ctx->Tensor4ArgNameAndIndex("x", 0);
user_op::Tensor* y_tensor = ctx->Tensor4ArgNameAndIndex("y", 0);
const int32_t elem_cnt = y_tensor->shape().elem_cnt();
const int32_t total_dims = y_tensor->shape().NumAxes();
std::vector<int32_t> dims = ctx->Attr<std::vector<int32_t>>("dims");
VIS vis;
for (auto x : dims) { vis.val[x] = true; }
SIZE_V sizes_v;
for (int32_t i = 0; i < total_dims; i++) { sizes_v.val[i] = y_tensor->shape().At(i); }
// TODO(bbuf) delete strides caluculate, after tensor strides supported
SIZE_V strides_v;
strides_v.val[total_dims - 1] = 1;
for (int32_t i = total_dims - 2; i >= 0; i--) {
strides_v.val[i] = strides_v.val[i + 1] * y_tensor->shape().At(i + 1);
}
SIZE_V stride_contiguous_v;
for (int32_t i = total_dims - 1; i >= 0; i--) {
if (i == total_dims - 1) {
stride_contiguous_v.val[i] = 1;
} else {
stride_contiguous_v.val[i] =
std::max<int32_t>(x_tensor->shape().At(i + 1), 1) * stride_contiguous_v.val[i + 1];
}
}
RUN_CUDA_KERNEL((FlipGpuForward<T>), ctx->stream(), elem_cnt, elem_cnt, total_dims,
stride_contiguous_v, sizes_v, vis, strides_v, x_tensor->dptr<T>(),
y_tensor->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
template<typename T>
class FlipGrad1DGpuKernel final : public user_op::OpKernel {
public:
FlipGrad1DGpuKernel() = default;
~FlipGrad1DGpuKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
user_op::Tensor* dx_tensor = ctx->Tensor4ArgNameAndIndex("dx", 0);
Memset<DeviceType::kGPU>(ctx->stream(), dx_tensor->mut_dptr<T>(), 0,
dx_tensor->shape().elem_cnt() * sizeof(T));
const user_op::Tensor* dy_tensor = ctx->Tensor4ArgNameAndIndex("dy", 0);
Memcpy<DeviceType::kGPU>(
ctx->stream(), dx_tensor->mut_dptr<void>(), dy_tensor->dptr<void>(),
dy_tensor->shape().elem_cnt() * GetSizeOfDataType(dy_tensor->data_type()));
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_FLIP_GPU_KERNEL(dtype) \
REGISTER_USER_KERNEL("flip").SetCreateFn<FlipGpuKernel<dtype>>().SetIsMatchedHob( \
(user_op::HobDeviceType() == DeviceType::kGPU) \
&& (user_op::HobDataType("y", 0) == GetDataType<dtype>::value)); \
REGISTER_USER_KERNEL("flip_grad") \
.SetCreateFn<FlipGrad1DGpuKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \
&& (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value));
REGISTER_FLIP_GPU_KERNEL(float)
REGISTER_FLIP_GPU_KERNEL(double)
REGISTER_FLIP_GPU_KERNEL(uint8_t)
REGISTER_FLIP_GPU_KERNEL(int8_t)
REGISTER_FLIP_GPU_KERNEL(int32_t)
REGISTER_FLIP_GPU_KERNEL(int64_t)
} // namespace oneflow
|
f3e1cff1cc0ee2ff48ac702e89fc2ad7d017960c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "particles.cuh"
__device__ int position_to_grid_index(float x, float dx){
return x/dx;
}
__device__ float position_in_cell(float x, float dx){
return x - dx * (int)(x/dx);
}
__device__ int ijk_to_n(int i, int j, int k, int N_grid){
return (N_grid * N_grid * (k%N_grid) + N_grid * (j%N_grid) + (i%N_grid));
}
__global__ void InitParticleArrays(Particle *d_p, float shiftx, float shifty,
float shiftz, float vx, float vy, float vz, int N_particles_1_axis, int N_particles){
int n = blockDim.x * blockIdx.x + threadIdx.x;
if (n<N_particles){
Particle *p = &(d_p[n]);
int i = n / (int)(N_particles_1_axis*N_particles_1_axis);
int j = (int) (n/N_particles_1_axis) % N_particles_1_axis;
int k = n % N_particles_1_axis;
p->x = L/float(N_particles_1_axis) * i;
p->x += shiftx*sin(2*M_PI/L*p->x);
p->x = p->x - floor(p->x/L)*L;
p->y = L/float(N_particles_1_axis) * j;
p->y += shifty*sin(2*M_PI/L*p->y);
p->y = p->y - floor(p->y/L)*L;
p->z = L/float(N_particles_1_axis) * k;
p->z += shiftz*sin(2*M_PI/L*p->z);
p->z = p->z - floor(p->z/L)*L;
p->vx = vx;
p->vy = vy;
p->vz = vz;
// if (threadIdx.x == 0)
// {
// printf("%d %f %f %f\n", blockIdx.x, p->x, p->y, p->z);
// }
}
}
void init_species(Species *s, float shiftx, float shifty, float shiftz,
float vx, float vy, float vz,
int N_particles_1_axis, int N_grid, float dx){
s->N_particles_1_axis = N_particles_1_axis;
s->N_particles = N_particles_1_axis * N_particles_1_axis * N_particles_1_axis;
s->particles = new Particle[s->N_particles];
s->particleThreads = dim3(pThreads);
s->particleBlocks = dim3((s->N_particles+s->particleThreads.x - 1)/s->particleThreads.x);
s->KE = 0;
s->Px = 0;
s->Py = 0;
s->Pz = 0;
s->block_v2s = new float[s->particleBlocks.x];
CUDA_ERROR(hipMalloc((void**)&(s->d_block_v2s), sizeof(float)*s->particleBlocks.x));
s->block_Px = new float[s->particleBlocks.x];
CUDA_ERROR(hipMalloc((void**)&(s->d_block_Px), sizeof(float)*s->particleBlocks.x));
s->block_Py = new float[s->particleBlocks.x];
CUDA_ERROR(hipMalloc((void**)&(s->d_block_Py), sizeof(float)*s->particleBlocks.x));
s->block_Pz = new float[s->particleBlocks.x];
CUDA_ERROR(hipMalloc((void**)&(s->d_block_Pz), sizeof(float)*s->particleBlocks.x));
CUDA_ERROR(hipMalloc((void**)&(s->d_sum_Px), sizeof(float)*(s->particleBlocks.x + pThreads - 1)/pThreads));
CUDA_ERROR(hipMalloc((void**)&(s->d_sum_Py), sizeof(float)*(s->particleBlocks.x + pThreads - 1)/pThreads));
CUDA_ERROR(hipMalloc((void**)&(s->d_sum_Pz), sizeof(float)*(s->particleBlocks.x + pThreads - 1)/pThreads));
CUDA_ERROR(hipMalloc((void**)&(s->d_sum_v2s), sizeof(float)*(s->particleBlocks.x + pThreads - 1)/pThreads));
CUDA_ERROR(hipMalloc((void**)&(s->d_moments), sizeof(float)*4));
s->moments = new float[4];
CUDA_ERROR(hipMalloc((void**)&(s->d_particles), sizeof(Particle)*s->N_particles));
printf("initializing particles\n");
hipLaunchKernelGGL(( InitParticleArrays), dim3(s->particleBlocks), dim3(s->particleThreads), 0, 0, s->d_particles, shiftx, shifty, shiftz, vx, vy, vz, s->N_particles_1_axis, s->N_particles);
printf("Blocks: %d %d %d Threads: %d %d %d \n",
s->particleBlocks.x,
s->particleBlocks.y,
s->particleBlocks.z,
s->particleThreads.x,
s->particleThreads.y,
s->particleThreads.z);
printf("Mass: %f Charge: %f N: %ld\n", s->m, s->q, s->N_particles);
}
__global__ void scatter_charge_kernel(Particle *d_P, float q, float* d_rho, int N_grid, float dx, int N_particles){
// __shared__ float local_rho[32*32*32];
int n = blockIdx.x*blockDim.x + threadIdx.x;
if (n < N_particles){
float x = d_P[n].x;
float y = d_P[n].y;
float z = d_P[n].z;
int i = position_to_grid_index(x, dx);
int j = position_to_grid_index(y, dx);
int k = position_to_grid_index(z, dx);
float Xr = position_in_cell(x, dx)/dx;
float Xl = 1 - Xr;
float Yr = position_in_cell(y, dx)/dx;
float Yl = 1 - Yr;
float Zr = position_in_cell(z, dx)/dx;
float Zl = 1 - Zr;
//TODO: redo this using a reduce
atomicAdd(&(d_rho[ijk_to_n(i, j, k, N_grid)]), q*Xl*Yl*Zl);
atomicAdd(&(d_rho[ijk_to_n(i+1, j, k, N_grid)]), q*Xr*Yl*Zl);
atomicAdd(&(d_rho[ijk_to_n(i, j+1, k, N_grid)]), q*Xl*Yr*Zl);
atomicAdd(&(d_rho[ijk_to_n(i, j, k+1, N_grid)]), q*Xl*Yl*Zr);
atomicAdd(&(d_rho[ijk_to_n(i+1, j+1, k, N_grid)]), q*Xr*Yr*Zl);
atomicAdd(&(d_rho[ijk_to_n(i+1, j, k+1, N_grid)]), q*Xr*Yl*Zr);
atomicAdd(&(d_rho[ijk_to_n(i, j+1, k+1, N_grid)]), q*Xl*Yr*Zr);
atomicAdd(&(d_rho[ijk_to_n(i+1, j+1, k+1, N_grid)]), q*Xr*Yr*Zr);
}
}
void scatter_charge(Species *s, Grid *g)
{
CUDA_ERROR(hipDeviceSynchronize());
hipLaunchKernelGGL(( scatter_charge_kernel), dim3(s->particleBlocks), dim3(s->particleThreads), 0, 0, s->d_particles,
s->q, g->d_rho, g->N_grid, g->dx, s->N_particles);
}
__device__ float gather_grid_to_particle(Particle *p, float *grid, int N_grid, float dx){
float x = p->x;
float y = p->y;
float z = p->z;
int i = position_to_grid_index(x, dx);
int j = position_to_grid_index(y, dx);
int k = position_to_grid_index(z, dx);
float Xr = position_in_cell(x, dx)/dx;
float Xl = 1 - Xr;
float Yr = position_in_cell(y, dx)/dx;
float Yl = 1 - Yr;
float Zr = position_in_cell(z, dx)/dx;
float Zl = 1 - Zr;
float interpolated_scalar = 0.0f;
interpolated_scalar += grid[ijk_to_n(i, j, k, N_grid)] *Xl*Yl*Zl;
interpolated_scalar += grid[ijk_to_n(i+1, j, k, N_grid)] *Xr*Yl*Zl;
interpolated_scalar += grid[ijk_to_n(i, j+1, k, N_grid)] *Xl*Yr*Zl;
interpolated_scalar += grid[ijk_to_n(i, j, k+1, N_grid)] *Xl*Yl*Zr;
interpolated_scalar += grid[ijk_to_n(i+1, j+1, k, N_grid)] *Xr*Yr*Zl;
interpolated_scalar += grid[ijk_to_n(i+1, j, k+1, N_grid)] *Xr*Yl*Zr;
interpolated_scalar += grid[ijk_to_n(i, j+1, k+1, N_grid)] *Xl*Yr*Zr;
interpolated_scalar += grid[ijk_to_n(i+1, j+1, k+1, N_grid)]*Xr*Yr*Zr;
return interpolated_scalar;
}
__global__ void InitialVelocityStep_kernel(Particle *d_p, float q, float m, float *d_Ex,
float *d_Ey, float *d_Ez, int N_particles, int N_grid, float dx, float dt){
int n = blockDim.x * blockIdx.x + threadIdx.x;
if(n<N_particles)
{
Particle *p = &(d_p[n]);
//gather electric field
float Ex = gather_grid_to_particle(p, d_Ex, N_grid, dx);
float Ey = gather_grid_to_particle(p, d_Ey, N_grid, dx);
float Ez = gather_grid_to_particle(p, d_Ez, N_grid, dx);
//use electric field to accelerate particles
p->vx -= 0.5f*dt*q/m*Ex;
p->vy -= 0.5f*dt*q/m*Ey;
p->vz -= 0.5f*dt*q/m*Ez;
}
}
void InitialVelocityStep(Species *s, Grid *g, float dt)
{
hipLaunchKernelGGL(( InitialVelocityStep_kernel), dim3(s->particleBlocks), dim3(s->particleThreads), 0, 0, s->d_particles,
s->q, s->m, g->d_Ex, g->d_Ey, g->d_Ez, s->N_particles, g->N_grid, g->dx, dt);
}
__global__ void ParticleKernel(Particle *d_p, float q, float m,
float *d_Ex, float *d_Ey, float *d_Ez, int N_particles, int N_grid, float dx, float dt,
float* d_block_v2s, float* d_block_Px, float* d_block_Py, float* d_block_Pz){
__shared__ float v2_array[pThreads];
__shared__ float Px_array[pThreads];
__shared__ float Py_array[pThreads];
__shared__ float Pz_array[pThreads];
int n = blockDim.x * blockIdx.x + threadIdx.x;
if(n<N_particles)
{
Particle *p = &(d_p[n]);
//push positions, enforce periodic boundary conditions
p->x = p->x + p->vx*dt;
p->x = p->x - floor(p->x/L)*L;
p->y = p->y + p->vy*dt;
p->y = p->y - floor(p->y/L)*L;
p->z = p->z + p->vz*dt;
p->z = p->z - floor(p->z/L)*L;
float old_vx = p->vx;
float old_vy = p->vy;
float old_vz = p->vz;
//gather electric field
float Ex = gather_grid_to_particle(p, d_Ex, N_grid, dx);
float Ey = gather_grid_to_particle(p, d_Ey, N_grid, dx);
float Ez = gather_grid_to_particle(p, d_Ez, N_grid, dx);
//use electric field to accelerate particles
p->vx += dt*q/m*Ex;
p->vy += dt*q/m*Ey;
p->vz += dt*q/m*Ez;
v2_array[threadIdx.x] = old_vx * p->vx + old_vy * p->vy + old_vz * p->vz;
Px_array[threadIdx.x] = old_vx * p->vx;
Py_array[threadIdx.x] = old_vy * p->vy;
Pz_array[threadIdx.x] = old_vz * p->vz;
__syncthreads();
for (int s = pThreads / 2; s > 0; s >>= 1)
{
if (threadIdx.x < s)
{
v2_array[threadIdx.x] += v2_array[threadIdx.x + s];
Px_array[threadIdx.x] += Px_array[threadIdx.x + s];
Py_array[threadIdx.x] += Py_array[threadIdx.x + s];
Pz_array[threadIdx.x] += Pz_array[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0)
{
d_block_v2s[blockIdx.x] = v2_array[0];
d_block_Px[blockIdx.x] = Px_array[0];
d_block_Py[blockIdx.x] = Py_array[0];
d_block_Pz[blockIdx.x] = Pz_array[0];
}
}
}
__global__ void reduce_moments(float *d_arr, float *d_results, int N)
{
__shared__ float sh_array[pThreads];
int n = blockDim.x * blockIdx.x + threadIdx.x;
// sh_array[threadIdx.x] = 0;
if (n < N){
for (int s = blockDim.x / 2; s > 0; s >>= 1){
if ( threadIdx.x < s)
{
sh_array[threadIdx.x] += d_arr[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x ==0){
d_results[blockIdx.x] = sh_array[0];
// printf("%d %f\n", blockIdx.x, d_results[blockIdx.x]);
}
}
}
void SpeciesPush(Species *s, Grid *g, float dt)
{
hipLaunchKernelGGL(( ParticleKernel), dim3(s->particleBlocks), dim3(s->particleThreads), 0, 0, s->d_particles,
s->q, s->m, g->d_Ex, g->d_Ey, g->d_Ez, s->N_particles, g->N_grid, g->dx, dt,
s->d_block_v2s, s->d_block_Px, s->d_block_Py, s->d_block_Pz);
// printf("%d %d %d\n", (s->particleBlocks.x + pThreads - 1)/pThreads, s->particleThreads.x, (s->N_particles + pThreads - 1)/pThreads);
CUDA_ERROR(hipDeviceSynchronize());
hipLaunchKernelGGL(( reduce_moments), dim3((s->particleBlocks.x + pThreads - 1)/pThreads), dim3(s->particleThreads), 0, 0, s->d_block_v2s, s->d_sum_v2s, (s->N_particles + pThreads - 1)/pThreads);
hipLaunchKernelGGL(( reduce_moments), dim3((s->particleBlocks.x + pThreads - 1)/pThreads), dim3(s->particleThreads), 0, 0, s->d_block_Px, s->d_sum_Px, (s->N_particles + pThreads - 1)/pThreads);
hipLaunchKernelGGL(( reduce_moments), dim3((s->particleBlocks.x + pThreads - 1)/pThreads), dim3(s->particleThreads), 0, 0, s->d_block_Py, s->d_sum_Py, (s->N_particles + pThreads - 1)/pThreads);
hipLaunchKernelGGL(( reduce_moments), dim3((s->particleBlocks.x + pThreads - 1)/pThreads), dim3(s->particleThreads), 0, 0, s->d_block_Pz, s->d_sum_Pz, (s->N_particles + pThreads - 1)/pThreads);
CUDA_ERROR(hipDeviceSynchronize());
// printf("%d %d %ld\n",1, (s->particleBlocks.x + pThreads - 1)/pThreads, (s->particleBlocks.x + pThreads - 1)/pThreads);
hipLaunchKernelGGL(( reduce_moments), dim3(1), dim3((s->particleBlocks.x + pThreads - 1)/pThreads), 0, 0, s->d_sum_v2s, &(s->d_moments[0]), (s->particleBlocks.x + pThreads - 1)/pThreads);
hipLaunchKernelGGL(( reduce_moments), dim3(1), dim3((s->particleBlocks.x + pThreads - 1)/pThreads), 0, 0, s->d_sum_Px, &(s->d_moments[1]), (s->particleBlocks.x + pThreads - 1)/pThreads);
hipLaunchKernelGGL(( reduce_moments), dim3(1), dim3((s->particleBlocks.x + pThreads - 1)/pThreads), 0, 0, s->d_sum_Py, &(s->d_moments[2]), (s->particleBlocks.x + pThreads - 1)/pThreads);
hipLaunchKernelGGL(( reduce_moments), dim3(1), dim3((s->particleBlocks.x + pThreads - 1)/pThreads), 0, 0, s->d_sum_Pz, &(s->d_moments[3]), (s->particleBlocks.x + pThreads - 1)/pThreads);
CUDA_ERROR(hipMemcpy(s->moments, s->d_moments, sizeof(float)*4, hipMemcpyDeviceToHost));
// printf("%f %f %f %f\n", s->moments[0], s->moments[1], s->moments[2], s->moments[3]);
s->KE = s->moments[0] * 0.5f * s->m;
s->Px = s->moments[1] * s->m;
s->Py = s->moments[2] * s->m;
s->Pz = s->moments[3] * s->m;
// printf("%f %f %f %f\n", s->KE, s->Px, s->Py, s->Pz);
}
void dump_position_data(Species *s, char* name){
// printf("Copying particles from GPU to device\n");
CUDA_ERROR(hipMemcpy(s->particles, s->d_particles, sizeof(Particle)*s->N_particles, hipMemcpyDeviceToHost));
// printf("Copied particles from GPU to device\n");
FILE *initial_position_data = fopen(name, "w");
for (int i =0; i<s->N_particles; i += 51)
{
Particle *p = &(s->particles[i]);
fprintf(initial_position_data, "%f %f %f %f %f %f\n", p->x, p->y, p->z, p->vx, p->vy, p->vz);
}
// free(s->particles);
fclose(initial_position_data);
}
void particle_cleanup(Species *s)
{
CUDA_ERROR(hipFree(s->d_particles));
CUDA_ERROR(hipFree(s->d_block_v2s));
CUDA_ERROR(hipFree(s->d_block_Px));
CUDA_ERROR(hipFree(s->d_block_Py));
CUDA_ERROR(hipFree(s->d_block_Pz));
}
|
f3e1cff1cc0ee2ff48ac702e89fc2ad7d017960c.cu
|
#include "particles.cuh"
__device__ int position_to_grid_index(float x, float dx){
return x/dx;
}
__device__ float position_in_cell(float x, float dx){
return x - dx * (int)(x/dx);
}
__device__ int ijk_to_n(int i, int j, int k, int N_grid){
return (N_grid * N_grid * (k%N_grid) + N_grid * (j%N_grid) + (i%N_grid));
}
__global__ void InitParticleArrays(Particle *d_p, float shiftx, float shifty,
float shiftz, float vx, float vy, float vz, int N_particles_1_axis, int N_particles){
int n = blockDim.x * blockIdx.x + threadIdx.x;
if (n<N_particles){
Particle *p = &(d_p[n]);
int i = n / (int)(N_particles_1_axis*N_particles_1_axis);
int j = (int) (n/N_particles_1_axis) % N_particles_1_axis;
int k = n % N_particles_1_axis;
p->x = L/float(N_particles_1_axis) * i;
p->x += shiftx*sin(2*M_PI/L*p->x);
p->x = p->x - floor(p->x/L)*L;
p->y = L/float(N_particles_1_axis) * j;
p->y += shifty*sin(2*M_PI/L*p->y);
p->y = p->y - floor(p->y/L)*L;
p->z = L/float(N_particles_1_axis) * k;
p->z += shiftz*sin(2*M_PI/L*p->z);
p->z = p->z - floor(p->z/L)*L;
p->vx = vx;
p->vy = vy;
p->vz = vz;
// if (threadIdx.x == 0)
// {
// printf("%d %f %f %f\n", blockIdx.x, p->x, p->y, p->z);
// }
}
}
void init_species(Species *s, float shiftx, float shifty, float shiftz,
float vx, float vy, float vz,
int N_particles_1_axis, int N_grid, float dx){
s->N_particles_1_axis = N_particles_1_axis;
s->N_particles = N_particles_1_axis * N_particles_1_axis * N_particles_1_axis;
s->particles = new Particle[s->N_particles];
s->particleThreads = dim3(pThreads);
s->particleBlocks = dim3((s->N_particles+s->particleThreads.x - 1)/s->particleThreads.x);
s->KE = 0;
s->Px = 0;
s->Py = 0;
s->Pz = 0;
s->block_v2s = new float[s->particleBlocks.x];
CUDA_ERROR(cudaMalloc((void**)&(s->d_block_v2s), sizeof(float)*s->particleBlocks.x));
s->block_Px = new float[s->particleBlocks.x];
CUDA_ERROR(cudaMalloc((void**)&(s->d_block_Px), sizeof(float)*s->particleBlocks.x));
s->block_Py = new float[s->particleBlocks.x];
CUDA_ERROR(cudaMalloc((void**)&(s->d_block_Py), sizeof(float)*s->particleBlocks.x));
s->block_Pz = new float[s->particleBlocks.x];
CUDA_ERROR(cudaMalloc((void**)&(s->d_block_Pz), sizeof(float)*s->particleBlocks.x));
CUDA_ERROR(cudaMalloc((void**)&(s->d_sum_Px), sizeof(float)*(s->particleBlocks.x + pThreads - 1)/pThreads));
CUDA_ERROR(cudaMalloc((void**)&(s->d_sum_Py), sizeof(float)*(s->particleBlocks.x + pThreads - 1)/pThreads));
CUDA_ERROR(cudaMalloc((void**)&(s->d_sum_Pz), sizeof(float)*(s->particleBlocks.x + pThreads - 1)/pThreads));
CUDA_ERROR(cudaMalloc((void**)&(s->d_sum_v2s), sizeof(float)*(s->particleBlocks.x + pThreads - 1)/pThreads));
CUDA_ERROR(cudaMalloc((void**)&(s->d_moments), sizeof(float)*4));
s->moments = new float[4];
CUDA_ERROR(cudaMalloc((void**)&(s->d_particles), sizeof(Particle)*s->N_particles));
printf("initializing particles\n");
InitParticleArrays<<<s->particleBlocks, s->particleThreads>>>(s->d_particles, shiftx, shifty, shiftz, vx, vy, vz, s->N_particles_1_axis, s->N_particles);
printf("Blocks: %d %d %d Threads: %d %d %d \n",
s->particleBlocks.x,
s->particleBlocks.y,
s->particleBlocks.z,
s->particleThreads.x,
s->particleThreads.y,
s->particleThreads.z);
printf("Mass: %f Charge: %f N: %ld\n", s->m, s->q, s->N_particles);
}
__global__ void scatter_charge_kernel(Particle *d_P, float q, float* d_rho, int N_grid, float dx, int N_particles){
// __shared__ float local_rho[32*32*32];
int n = blockIdx.x*blockDim.x + threadIdx.x;
if (n < N_particles){
float x = d_P[n].x;
float y = d_P[n].y;
float z = d_P[n].z;
int i = position_to_grid_index(x, dx);
int j = position_to_grid_index(y, dx);
int k = position_to_grid_index(z, dx);
float Xr = position_in_cell(x, dx)/dx;
float Xl = 1 - Xr;
float Yr = position_in_cell(y, dx)/dx;
float Yl = 1 - Yr;
float Zr = position_in_cell(z, dx)/dx;
float Zl = 1 - Zr;
//TODO: redo this using a reduce
atomicAdd(&(d_rho[ijk_to_n(i, j, k, N_grid)]), q*Xl*Yl*Zl);
atomicAdd(&(d_rho[ijk_to_n(i+1, j, k, N_grid)]), q*Xr*Yl*Zl);
atomicAdd(&(d_rho[ijk_to_n(i, j+1, k, N_grid)]), q*Xl*Yr*Zl);
atomicAdd(&(d_rho[ijk_to_n(i, j, k+1, N_grid)]), q*Xl*Yl*Zr);
atomicAdd(&(d_rho[ijk_to_n(i+1, j+1, k, N_grid)]), q*Xr*Yr*Zl);
atomicAdd(&(d_rho[ijk_to_n(i+1, j, k+1, N_grid)]), q*Xr*Yl*Zr);
atomicAdd(&(d_rho[ijk_to_n(i, j+1, k+1, N_grid)]), q*Xl*Yr*Zr);
atomicAdd(&(d_rho[ijk_to_n(i+1, j+1, k+1, N_grid)]), q*Xr*Yr*Zr);
}
}
void scatter_charge(Species *s, Grid *g)
{
CUDA_ERROR(cudaDeviceSynchronize());
scatter_charge_kernel<<<s->particleBlocks, s->particleThreads>>>(s->d_particles,
s->q, g->d_rho, g->N_grid, g->dx, s->N_particles);
}
__device__ float gather_grid_to_particle(Particle *p, float *grid, int N_grid, float dx){
float x = p->x;
float y = p->y;
float z = p->z;
int i = position_to_grid_index(x, dx);
int j = position_to_grid_index(y, dx);
int k = position_to_grid_index(z, dx);
float Xr = position_in_cell(x, dx)/dx;
float Xl = 1 - Xr;
float Yr = position_in_cell(y, dx)/dx;
float Yl = 1 - Yr;
float Zr = position_in_cell(z, dx)/dx;
float Zl = 1 - Zr;
float interpolated_scalar = 0.0f;
interpolated_scalar += grid[ijk_to_n(i, j, k, N_grid)] *Xl*Yl*Zl;
interpolated_scalar += grid[ijk_to_n(i+1, j, k, N_grid)] *Xr*Yl*Zl;
interpolated_scalar += grid[ijk_to_n(i, j+1, k, N_grid)] *Xl*Yr*Zl;
interpolated_scalar += grid[ijk_to_n(i, j, k+1, N_grid)] *Xl*Yl*Zr;
interpolated_scalar += grid[ijk_to_n(i+1, j+1, k, N_grid)] *Xr*Yr*Zl;
interpolated_scalar += grid[ijk_to_n(i+1, j, k+1, N_grid)] *Xr*Yl*Zr;
interpolated_scalar += grid[ijk_to_n(i, j+1, k+1, N_grid)] *Xl*Yr*Zr;
interpolated_scalar += grid[ijk_to_n(i+1, j+1, k+1, N_grid)]*Xr*Yr*Zr;
return interpolated_scalar;
}
__global__ void InitialVelocityStep_kernel(Particle *d_p, float q, float m, float *d_Ex,
float *d_Ey, float *d_Ez, int N_particles, int N_grid, float dx, float dt){
int n = blockDim.x * blockIdx.x + threadIdx.x;
if(n<N_particles)
{
Particle *p = &(d_p[n]);
//gather electric field
float Ex = gather_grid_to_particle(p, d_Ex, N_grid, dx);
float Ey = gather_grid_to_particle(p, d_Ey, N_grid, dx);
float Ez = gather_grid_to_particle(p, d_Ez, N_grid, dx);
//use electric field to accelerate particles
p->vx -= 0.5f*dt*q/m*Ex;
p->vy -= 0.5f*dt*q/m*Ey;
p->vz -= 0.5f*dt*q/m*Ez;
}
}
void InitialVelocityStep(Species *s, Grid *g, float dt)
{
InitialVelocityStep_kernel<<<s->particleBlocks, s->particleThreads>>>(s->d_particles,
s->q, s->m, g->d_Ex, g->d_Ey, g->d_Ez, s->N_particles, g->N_grid, g->dx, dt);
}
__global__ void ParticleKernel(Particle *d_p, float q, float m,
float *d_Ex, float *d_Ey, float *d_Ez, int N_particles, int N_grid, float dx, float dt,
float* d_block_v2s, float* d_block_Px, float* d_block_Py, float* d_block_Pz){
__shared__ float v2_array[pThreads];
__shared__ float Px_array[pThreads];
__shared__ float Py_array[pThreads];
__shared__ float Pz_array[pThreads];
int n = blockDim.x * blockIdx.x + threadIdx.x;
if(n<N_particles)
{
Particle *p = &(d_p[n]);
//push positions, enforce periodic boundary conditions
p->x = p->x + p->vx*dt;
p->x = p->x - floor(p->x/L)*L;
p->y = p->y + p->vy*dt;
p->y = p->y - floor(p->y/L)*L;
p->z = p->z + p->vz*dt;
p->z = p->z - floor(p->z/L)*L;
float old_vx = p->vx;
float old_vy = p->vy;
float old_vz = p->vz;
//gather electric field
float Ex = gather_grid_to_particle(p, d_Ex, N_grid, dx);
float Ey = gather_grid_to_particle(p, d_Ey, N_grid, dx);
float Ez = gather_grid_to_particle(p, d_Ez, N_grid, dx);
//use electric field to accelerate particles
p->vx += dt*q/m*Ex;
p->vy += dt*q/m*Ey;
p->vz += dt*q/m*Ez;
v2_array[threadIdx.x] = old_vx * p->vx + old_vy * p->vy + old_vz * p->vz;
Px_array[threadIdx.x] = old_vx * p->vx;
Py_array[threadIdx.x] = old_vy * p->vy;
Pz_array[threadIdx.x] = old_vz * p->vz;
__syncthreads();
for (int s = pThreads / 2; s > 0; s >>= 1)
{
if (threadIdx.x < s)
{
v2_array[threadIdx.x] += v2_array[threadIdx.x + s];
Px_array[threadIdx.x] += Px_array[threadIdx.x + s];
Py_array[threadIdx.x] += Py_array[threadIdx.x + s];
Pz_array[threadIdx.x] += Pz_array[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0)
{
d_block_v2s[blockIdx.x] = v2_array[0];
d_block_Px[blockIdx.x] = Px_array[0];
d_block_Py[blockIdx.x] = Py_array[0];
d_block_Pz[blockIdx.x] = Pz_array[0];
}
}
}
__global__ void reduce_moments(float *d_arr, float *d_results, int N)
{
__shared__ float sh_array[pThreads];
int n = blockDim.x * blockIdx.x + threadIdx.x;
// sh_array[threadIdx.x] = 0;
if (n < N){
for (int s = blockDim.x / 2; s > 0; s >>= 1){
if ( threadIdx.x < s)
{
sh_array[threadIdx.x] += d_arr[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x ==0){
d_results[blockIdx.x] = sh_array[0];
// printf("%d %f\n", blockIdx.x, d_results[blockIdx.x]);
}
}
}
void SpeciesPush(Species *s, Grid *g, float dt)
{
ParticleKernel<<<s->particleBlocks, s->particleThreads>>>(s->d_particles,
s->q, s->m, g->d_Ex, g->d_Ey, g->d_Ez, s->N_particles, g->N_grid, g->dx, dt,
s->d_block_v2s, s->d_block_Px, s->d_block_Py, s->d_block_Pz);
// printf("%d %d %d\n", (s->particleBlocks.x + pThreads - 1)/pThreads, s->particleThreads.x, (s->N_particles + pThreads - 1)/pThreads);
CUDA_ERROR(cudaDeviceSynchronize());
reduce_moments<<<(s->particleBlocks.x + pThreads - 1)/pThreads, s->particleThreads>>>(s->d_block_v2s, s->d_sum_v2s, (s->N_particles + pThreads - 1)/pThreads);
reduce_moments<<<(s->particleBlocks.x + pThreads - 1)/pThreads, s->particleThreads>>>(s->d_block_Px, s->d_sum_Px, (s->N_particles + pThreads - 1)/pThreads);
reduce_moments<<<(s->particleBlocks.x + pThreads - 1)/pThreads, s->particleThreads>>>(s->d_block_Py, s->d_sum_Py, (s->N_particles + pThreads - 1)/pThreads);
reduce_moments<<<(s->particleBlocks.x + pThreads - 1)/pThreads, s->particleThreads>>>(s->d_block_Pz, s->d_sum_Pz, (s->N_particles + pThreads - 1)/pThreads);
CUDA_ERROR(cudaDeviceSynchronize());
// printf("%d %d %ld\n",1, (s->particleBlocks.x + pThreads - 1)/pThreads, (s->particleBlocks.x + pThreads - 1)/pThreads);
reduce_moments<<<1, (s->particleBlocks.x + pThreads - 1)/pThreads>>>(s->d_sum_v2s, &(s->d_moments[0]), (s->particleBlocks.x + pThreads - 1)/pThreads);
reduce_moments<<<1, (s->particleBlocks.x + pThreads - 1)/pThreads>>>(s->d_sum_Px, &(s->d_moments[1]), (s->particleBlocks.x + pThreads - 1)/pThreads);
reduce_moments<<<1, (s->particleBlocks.x + pThreads - 1)/pThreads>>>(s->d_sum_Py, &(s->d_moments[2]), (s->particleBlocks.x + pThreads - 1)/pThreads);
reduce_moments<<<1, (s->particleBlocks.x + pThreads - 1)/pThreads>>>(s->d_sum_Pz, &(s->d_moments[3]), (s->particleBlocks.x + pThreads - 1)/pThreads);
CUDA_ERROR(cudaMemcpy(s->moments, s->d_moments, sizeof(float)*4, cudaMemcpyDeviceToHost));
// printf("%f %f %f %f\n", s->moments[0], s->moments[1], s->moments[2], s->moments[3]);
s->KE = s->moments[0] * 0.5f * s->m;
s->Px = s->moments[1] * s->m;
s->Py = s->moments[2] * s->m;
s->Pz = s->moments[3] * s->m;
// printf("%f %f %f %f\n", s->KE, s->Px, s->Py, s->Pz);
}
void dump_position_data(Species *s, char* name){
// printf("Copying particles from GPU to device\n");
CUDA_ERROR(cudaMemcpy(s->particles, s->d_particles, sizeof(Particle)*s->N_particles, cudaMemcpyDeviceToHost));
// printf("Copied particles from GPU to device\n");
FILE *initial_position_data = fopen(name, "w");
for (int i =0; i<s->N_particles; i += 51)
{
Particle *p = &(s->particles[i]);
fprintf(initial_position_data, "%f %f %f %f %f %f\n", p->x, p->y, p->z, p->vx, p->vy, p->vz);
}
// free(s->particles);
fclose(initial_position_data);
}
void particle_cleanup(Species *s)
{
CUDA_ERROR(cudaFree(s->d_particles));
CUDA_ERROR(cudaFree(s->d_block_v2s));
CUDA_ERROR(cudaFree(s->d_block_Px));
CUDA_ERROR(cudaFree(s->d_block_Py));
CUDA_ERROR(cudaFree(s->d_block_Pz));
}
|
db8a396a0ecf0e410f2b96bc50de3a71fdaf764d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#define LIST_SIZE_GLOBAL 5000000
#define LIST_SIZE 10000
extern "C" __device__ unsigned long long load_store_index[LIST_SIZE];
extern "C" __device__ unsigned long long load_store_value[LIST_SIZE];
extern "C" __device__ double load_store_double[LIST_SIZE];
extern "C" __device__ unsigned long long load_store_double_index[LIST_SIZE];
extern "C" __device__ unsigned long long record_flag;
extern "C" __device__ unsigned long long call_count;
int memPro_kernel = 0;
void bambooLogRecordOff(){
long long local_record = 0;
hipMemcpyToSymbol(record_flag, &local_record, sizeof(long long), 0, hipMemcpyHostToDevice);
}
void bambooLogKernelBegin(long long i) {
hipMemcpyToSymbol(call_count, &i, sizeof(long long), 0, hipMemcpyHostToDevice);
i = 1;
hipMemcpyToSymbol(record_flag, &i, sizeof(long long), 0, hipMemcpyHostToDevice);
}
void bambooLogKernelEnd()
{
unsigned long long loadStoreIndex[LIST_SIZE] = {0};
unsigned long long loadStoreValue[LIST_SIZE] = {0};
unsigned long long loadStoreIndex_double[LIST_SIZE] = {0};
double loadStoreValue_double[LIST_SIZE] = {0};
FILE *profileFile = fopen("profile_mem_val_result.txt", "a");
for (int j=0; j < LIST_SIZE_GLOBAL; j+=LIST_SIZE)
{
hipMemcpyFromSymbol(&loadStoreIndex, load_store_index, LIST_SIZE * sizeof(unsigned long long), j*sizeof(unsigned long long), hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&loadStoreValue, load_store_value, LIST_SIZE * sizeof(unsigned long long), j*sizeof(unsigned long long), hipMemcpyDeviceToHost);
for(long long i=0; i < LIST_SIZE && loadStoreIndex[i] != 0; i++)
{
fprintf(profileFile, "%lld %lld\n", loadStoreIndex[i], loadStoreValue[i]);
}
}
for (int j=0; j < LIST_SIZE_GLOBAL; j+=LIST_SIZE)
{
hipMemcpyFromSymbol(&loadStoreIndex_double, load_store_double_index, LIST_SIZE * sizeof(unsigned long long), j*sizeof(unsigned long long), hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&loadStoreValue_double, load_store_double, LIST_SIZE * sizeof(double), j*sizeof(double), hipMemcpyDeviceToHost);
for(long long i=0; i < LIST_SIZE && loadStoreIndex_double[i] != 0; i++)
{
fprintf(profileFile, "%lld %.40f\n", loadStoreIndex_double[i], loadStoreValue_double[i]);
}
}
fclose(profileFile);
}
|
db8a396a0ecf0e410f2b96bc50de3a71fdaf764d.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <device_launch_parameters.h>
#define LIST_SIZE_GLOBAL 5000000
#define LIST_SIZE 10000
extern "C" __device__ unsigned long long load_store_index[LIST_SIZE];
extern "C" __device__ unsigned long long load_store_value[LIST_SIZE];
extern "C" __device__ double load_store_double[LIST_SIZE];
extern "C" __device__ unsigned long long load_store_double_index[LIST_SIZE];
extern "C" __device__ unsigned long long record_flag;
extern "C" __device__ unsigned long long call_count;
int memPro_kernel = 0;
void bambooLogRecordOff(){
long long local_record = 0;
cudaMemcpyToSymbol(record_flag, &local_record, sizeof(long long), 0, cudaMemcpyHostToDevice);
}
void bambooLogKernelBegin(long long i) {
cudaMemcpyToSymbol(call_count, &i, sizeof(long long), 0, cudaMemcpyHostToDevice);
i = 1;
cudaMemcpyToSymbol(record_flag, &i, sizeof(long long), 0, cudaMemcpyHostToDevice);
}
void bambooLogKernelEnd()
{
unsigned long long loadStoreIndex[LIST_SIZE] = {0};
unsigned long long loadStoreValue[LIST_SIZE] = {0};
unsigned long long loadStoreIndex_double[LIST_SIZE] = {0};
double loadStoreValue_double[LIST_SIZE] = {0};
FILE *profileFile = fopen("profile_mem_val_result.txt", "a");
for (int j=0; j < LIST_SIZE_GLOBAL; j+=LIST_SIZE)
{
cudaMemcpyFromSymbol(&loadStoreIndex, load_store_index, LIST_SIZE * sizeof(unsigned long long), j*sizeof(unsigned long long), cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&loadStoreValue, load_store_value, LIST_SIZE * sizeof(unsigned long long), j*sizeof(unsigned long long), cudaMemcpyDeviceToHost);
for(long long i=0; i < LIST_SIZE && loadStoreIndex[i] != 0; i++)
{
fprintf(profileFile, "%lld %lld\n", loadStoreIndex[i], loadStoreValue[i]);
}
}
for (int j=0; j < LIST_SIZE_GLOBAL; j+=LIST_SIZE)
{
cudaMemcpyFromSymbol(&loadStoreIndex_double, load_store_double_index, LIST_SIZE * sizeof(unsigned long long), j*sizeof(unsigned long long), cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&loadStoreValue_double, load_store_double, LIST_SIZE * sizeof(double), j*sizeof(double), cudaMemcpyDeviceToHost);
for(long long i=0; i < LIST_SIZE && loadStoreIndex_double[i] != 0; i++)
{
fprintf(profileFile, "%lld %.40f\n", loadStoreIndex_double[i], loadStoreValue_double[i]);
}
}
fclose(profileFile);
}
|
f64713a2075556dcab2200731467177330e235a5.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
*
* @brief Add function GPU implementation
*
* @file addCU.cu
* @author Guillermo Hernndez
* @date 16 Mar 2016
*
*/
// System includes
#include <stdlib.h>
#include <stdio.h>
// CUDA runtime
#include <hip/hip_runtime.h>
/**
* @brief Macro to check for CUDA errors
*
* If code!=hipSuccess (0) it prints a message in stderr and returns 1.
*
* @param code integer code returned by last CUDA-related function (hipMalloc, hipGetLastError,...)
* @param msg a string describing the error
*/
#define checkError(code,msg) if (code != hipSuccess) {\
fprintf(stderr, msg);\
fprintf(stderr,"(error code %s)\n",hipGetErrorString(err));\
return 1;\
}
/**
* @brief CUDA Kernel to calculate vector addition
*
* Kernel to computes the vector addition of @p A and @p B into @p C, all of them having @p n elements
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
C[i] = A[i] + B[i];
}
}
extern "C"
int AddGPU(const float *h_A, const float *h_B,float *h_C, int n)
{
// GPU implementation must wrap the call to the kernel
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
size_t size = n * sizeof(float);
// Allocate the device input vectors
float *d_A = NULL;
err = hipMalloc((void **) &d_A, size);
checkError(err,"Failed to allocate device vector A");
float *d_B = NULL;
err = hipMalloc((void **) &d_B, size);
checkError(err,"Failed to allocate device vector B");
float *d_C = NULL;
err = hipMalloc((void **) &d_C, size);
checkError(err,"Failed to allocate device vector C");
// Copy input to device memory
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
checkError(err,"Failed to copy vector A from host to device");
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
checkError(err,"Failed to copy vector B from host to device");
// Launch the kernel
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid,
threadsPerBlock);
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, n);
err = hipGetLastError();
checkError(err,"Failed to launch vectorAdd kernel");
// Copy the device result vector in device memory to the host result vector
// in host memory.
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
checkError(err,"Failed to copy vector C from device to host");
// Free device global memory
err = hipFree(d_A);
checkError(err,"Failed to free device vector A");
err = hipFree(d_B);
checkError(err,"Failed to free device vector B");
err = hipFree(d_C);
checkError(err,"Failed to free device vector C");
return 0;
}
|
f64713a2075556dcab2200731467177330e235a5.cu
|
/**
*
* @brief Add function GPU implementation
*
* @file addCU.cu
* @author Guillermo Hernández
* @date 16 Mar 2016
*
*/
// System includes
#include <stdlib.h>
#include <stdio.h>
// CUDA runtime
#include <cuda_runtime.h>
/**
* @brief Macro to check for CUDA errors
*
* If code!=cudaSuccess (0) it prints a message in stderr and returns 1.
*
* @param code integer code returned by last CUDA-related function (cudaMalloc, cudaGetLastError,...)
* @param msg a string describing the error
*/
#define checkError(code,msg) if (code != cudaSuccess) {\
fprintf(stderr, msg);\
fprintf(stderr,"(error code %s)\n",cudaGetErrorString(err));\
return 1;\
}
/**
* @brief CUDA Kernel to calculate vector addition
*
* Kernel to computes the vector addition of @p A and @p B into @p C, all of them having @p n elements
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
C[i] = A[i] + B[i];
}
}
extern "C"
int AddGPU(const float *h_A, const float *h_B,float *h_C, int n)
{
// GPU implementation must wrap the call to the kernel
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
size_t size = n * sizeof(float);
// Allocate the device input vectors
float *d_A = NULL;
err = cudaMalloc((void **) &d_A, size);
checkError(err,"Failed to allocate device vector A");
float *d_B = NULL;
err = cudaMalloc((void **) &d_B, size);
checkError(err,"Failed to allocate device vector B");
float *d_C = NULL;
err = cudaMalloc((void **) &d_C, size);
checkError(err,"Failed to allocate device vector C");
// Copy input to device memory
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
checkError(err,"Failed to copy vector A from host to device");
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
checkError(err,"Failed to copy vector B from host to device");
// Launch the kernel
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid,
threadsPerBlock);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, n);
err = cudaGetLastError();
checkError(err,"Failed to launch vectorAdd kernel");
// Copy the device result vector in device memory to the host result vector
// in host memory.
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
checkError(err,"Failed to copy vector C from device to host");
// Free device global memory
err = cudaFree(d_A);
checkError(err,"Failed to free device vector A");
err = cudaFree(d_B);
checkError(err,"Failed to free device vector B");
err = cudaFree(d_C);
checkError(err,"Failed to free device vector C");
return 0;
}
|
615c1e83db9731a54286829849e8d1cbe256ca3e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) OpenMMLab. All rights reserved
#include <assert.h>
#include <hip/hip_fp16.h>
#include "common_cuda_helper.hpp"
#include "modulated_deform_conv/modulated_deform_conv_cuda.cuh"
#include "trt_modulated_deform_conv_kernel.hpp"
#include "trt_plugin_helper.hpp"
template <typename T>
void trt_modulated_deformable_im2col(const T* data_im_, const T* data_offset_, const T* data_mask_,
const int batch_size, const int channels, const int height_im,
const int width_im, const int height_col, const int width_col,
const int kernel_h, const int kenerl_w, const int pad_h,
const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, T* data_col_,
hipStream_t stream) {
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel<T>)
, dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0, stream,
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col_);
cudaCheckError();
}
template <typename scalar_t>
__global__ void output_add_bias_kernel(scalar_t* output, const scalar_t* bias, size_t step_batch,
size_t step_channel, size_t n) {
CUDA_1D_KERNEL_LOOP(index, n) { output[index] += bias[(index % step_batch) / step_channel]; }
}
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
template <>
__global__ void output_add_bias_kernel<__half>(__half* output, const __half* bias,
size_t step_batch, size_t step_channel, size_t n) {
CUDA_1D_KERNEL_LOOP(index, n) {
const __half b = bias[(index % step_batch) / step_channel];
const __half o = output[index];
output[index] = __hadd(o, b);
}
}
#else
template <>
__global__ void output_add_bias_kernel<__half>(__half* output, const __half* bias,
size_t step_batch, size_t step_channel, size_t n) {
CUDA_1D_KERNEL_LOOP(index, n) {
const __half b = bias[(index % step_batch) / step_channel];
const __half o = output[index];
output[index] = __float2half(__half2float(o) + __half2float(b));
}
}
#endif
template <typename scalar_t>
static void output_add_bias(scalar_t* output, const scalar_t* bias, size_t batch, size_t channel,
size_t height, size_t width, hipStream_t stream) {
size_t step_channel = height * width;
size_t step_batch = step_channel * channel;
size_t n = step_batch * batch;
hipLaunchKernelGGL(( output_add_bias_kernel), dim3(GET_BLOCKS(n)), dim3(THREADS_PER_BLOCK), 0, stream, output, bias, step_batch,
step_channel, n);
}
template <typename scalar_t>
void ModulatedDeformConvForwardCUDAKernelLauncher(
const scalar_t* input, const scalar_t* weight, const scalar_t* bias, const scalar_t* offset,
const scalar_t* mask, scalar_t* output, void* workspace, int batch, int channels, int height,
int width, int channels_out, int kernel_w, int kernel_h, int stride_w, int stride_h, int pad_w,
int pad_h, int dilation_w, int dilation_h, int group, int deformable_group, int im2col_step,
hipblasHandle_t cublas_handle, hipStream_t stream) {
bool with_bias = (bias != nullptr);
im2col_step = ::min(int(batch), im2col_step);
assert(batch % im2col_step == 0);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
scalar_t* columns = (scalar_t*)workspace;
const size_t input_step = channels * height * width;
const size_t offset_step = deformable_group * kernel_h * kernel_w * 2 * height_out * width_out;
const size_t mask_step = deformable_group * kernel_h * kernel_w * height_out * width_out;
const size_t out_step = channels_out * height_out * width_out;
const size_t out_group_step = out_step / group;
const size_t col_g_step = channels * kernel_w * kernel_h / group * height_out * width_out;
const size_t weight_g_step = channels_out / group * channels / group * kernel_h * kernel_w;
const int m = channels_out / group;
const int n = height_out * width_out;
const int k = channels / group * kernel_h * kernel_w;
scalar_t alpha = 1.;
scalar_t beta = 0.;
for (int b = 0; b < batch; b++) {
const scalar_t* input_start = input + b * input_step;
const scalar_t* offset_start = offset + b * offset_step;
const scalar_t* mask_start = mask + b * mask_step;
trt_modulated_deformable_im2col<scalar_t>(
input_start, offset_start, mask_start, 1, channels, height, width, height_out, width_out,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
deformable_group, columns, stream);
for (int g = 0; g < group; g++) {
const scalar_t* weight_start = weight + g * weight_g_step;
scalar_t* col_start = columns + g * col_g_step;
scalar_t* out_buffer_start = output + b * out_step + g * out_group_step;
cublasGemmWrap<scalar_t>(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, col_start,
n, weight_start, k, &beta, out_buffer_start, n);
cudaCheckError();
}
}
if (with_bias) {
output_add_bias<scalar_t>(output, bias, batch, channels_out, height_out, width_out, stream);
}
}
template void ModulatedDeformConvForwardCUDAKernelLauncher<float>(
const float* input, const float* weight, const float* bias, const float* offset,
const float* mask, float* output, void* workspace, int batch, int channels, int height,
int width, int channels_out, int kernel_w, int kernel_h, int stride_w, int stride_h, int pad_w,
int pad_h, int dilation_w, int dilation_h, int group, int deformable_group, int im2col_step,
hipblasHandle_t cublas_handle, hipStream_t stream);
template void ModulatedDeformConvForwardCUDAKernelLauncher<__half>(
const __half* input, const __half* weight, const __half* bias, const __half* offset,
const __half* mask, __half* output, void* workspace, int batch, int channels, int height,
int width, int channels_out, int kernel_w, int kernel_h, int stride_w, int stride_h, int pad_w,
int pad_h, int dilation_w, int dilation_h, int group, int deformable_group, int im2col_step,
hipblasHandle_t cublas_handle, hipStream_t stream);
|
615c1e83db9731a54286829849e8d1cbe256ca3e.cu
|
// Copyright (c) OpenMMLab. All rights reserved
#include <assert.h>
#include <cuda_fp16.h>
#include "common_cuda_helper.hpp"
#include "modulated_deform_conv/modulated_deform_conv_cuda.cuh"
#include "trt_modulated_deform_conv_kernel.hpp"
#include "trt_plugin_helper.hpp"
template <typename T>
void trt_modulated_deformable_im2col(const T* data_im_, const T* data_offset_, const T* data_mask_,
const int batch_size, const int channels, const int height_im,
const int width_im, const int height_col, const int width_col,
const int kernel_h, const int kenerl_w, const int pad_h,
const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, T* data_col_,
cudaStream_t stream) {
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
modulated_deformable_im2col_gpu_kernel<T>
<<<GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, stream>>>(
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col_);
cudaCheckError();
}
template <typename scalar_t>
__global__ void output_add_bias_kernel(scalar_t* output, const scalar_t* bias, size_t step_batch,
size_t step_channel, size_t n) {
CUDA_1D_KERNEL_LOOP(index, n) { output[index] += bias[(index % step_batch) / step_channel]; }
}
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
template <>
__global__ void output_add_bias_kernel<__half>(__half* output, const __half* bias,
size_t step_batch, size_t step_channel, size_t n) {
CUDA_1D_KERNEL_LOOP(index, n) {
const __half b = bias[(index % step_batch) / step_channel];
const __half o = output[index];
output[index] = __hadd(o, b);
}
}
#else
template <>
__global__ void output_add_bias_kernel<__half>(__half* output, const __half* bias,
size_t step_batch, size_t step_channel, size_t n) {
CUDA_1D_KERNEL_LOOP(index, n) {
const __half b = bias[(index % step_batch) / step_channel];
const __half o = output[index];
output[index] = __float2half(__half2float(o) + __half2float(b));
}
}
#endif
template <typename scalar_t>
static void output_add_bias(scalar_t* output, const scalar_t* bias, size_t batch, size_t channel,
size_t height, size_t width, cudaStream_t stream) {
size_t step_channel = height * width;
size_t step_batch = step_channel * channel;
size_t n = step_batch * batch;
output_add_bias_kernel<<<GET_BLOCKS(n), THREADS_PER_BLOCK, 0, stream>>>(output, bias, step_batch,
step_channel, n);
}
template <typename scalar_t>
void ModulatedDeformConvForwardCUDAKernelLauncher(
const scalar_t* input, const scalar_t* weight, const scalar_t* bias, const scalar_t* offset,
const scalar_t* mask, scalar_t* output, void* workspace, int batch, int channels, int height,
int width, int channels_out, int kernel_w, int kernel_h, int stride_w, int stride_h, int pad_w,
int pad_h, int dilation_w, int dilation_h, int group, int deformable_group, int im2col_step,
cublasHandle_t cublas_handle, cudaStream_t stream) {
bool with_bias = (bias != nullptr);
im2col_step = std::min(int(batch), im2col_step);
assert(batch % im2col_step == 0);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
scalar_t* columns = (scalar_t*)workspace;
const size_t input_step = channels * height * width;
const size_t offset_step = deformable_group * kernel_h * kernel_w * 2 * height_out * width_out;
const size_t mask_step = deformable_group * kernel_h * kernel_w * height_out * width_out;
const size_t out_step = channels_out * height_out * width_out;
const size_t out_group_step = out_step / group;
const size_t col_g_step = channels * kernel_w * kernel_h / group * height_out * width_out;
const size_t weight_g_step = channels_out / group * channels / group * kernel_h * kernel_w;
const int m = channels_out / group;
const int n = height_out * width_out;
const int k = channels / group * kernel_h * kernel_w;
scalar_t alpha = 1.;
scalar_t beta = 0.;
for (int b = 0; b < batch; b++) {
const scalar_t* input_start = input + b * input_step;
const scalar_t* offset_start = offset + b * offset_step;
const scalar_t* mask_start = mask + b * mask_step;
trt_modulated_deformable_im2col<scalar_t>(
input_start, offset_start, mask_start, 1, channels, height, width, height_out, width_out,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
deformable_group, columns, stream);
for (int g = 0; g < group; g++) {
const scalar_t* weight_start = weight + g * weight_g_step;
scalar_t* col_start = columns + g * col_g_step;
scalar_t* out_buffer_start = output + b * out_step + g * out_group_step;
cublasGemmWrap<scalar_t>(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, col_start,
n, weight_start, k, &beta, out_buffer_start, n);
cudaCheckError();
}
}
if (with_bias) {
output_add_bias<scalar_t>(output, bias, batch, channels_out, height_out, width_out, stream);
}
}
template void ModulatedDeformConvForwardCUDAKernelLauncher<float>(
const float* input, const float* weight, const float* bias, const float* offset,
const float* mask, float* output, void* workspace, int batch, int channels, int height,
int width, int channels_out, int kernel_w, int kernel_h, int stride_w, int stride_h, int pad_w,
int pad_h, int dilation_w, int dilation_h, int group, int deformable_group, int im2col_step,
cublasHandle_t cublas_handle, cudaStream_t stream);
template void ModulatedDeformConvForwardCUDAKernelLauncher<__half>(
const __half* input, const __half* weight, const __half* bias, const __half* offset,
const __half* mask, __half* output, void* workspace, int batch, int channels, int height,
int width, int channels_out, int kernel_w, int kernel_h, int stride_w, int stride_h, int pad_w,
int pad_h, int dilation_w, int dilation_h, int group, int deformable_group, int im2col_step,
cublasHandle_t cublas_handle, cudaStream_t stream);
|
1b4324ea5869c809ca6479142f5af0ada530ca6c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<iostream>
using namespace std;
/* a sum reduction on the array of floats 'in'.
* The reduction result is written to the
* address 'result'. The number of elements to
* be reduced is given by 'size'
*
* The example contains data races because barrier
* synchronisation statements, of the form:
* __syncthreads();
* are missing.
*
* Can you add them to eliminate all data races?
*/
#define N 4 /* Same as blockDim */
__global__ void reduce(int *in) {
int tid = threadIdx.x;
for(int d = N/2; d > 0; d >>= 1) {
if(tid < d) {
in[tid] += in[tid + d];
__syncthreads();
}
}
/* Master thread writes out result */
}
|
1b4324ea5869c809ca6479142f5af0ada530ca6c.cu
|
#include<stdio.h>
#include<iostream>
using namespace std;
/* a sum reduction on the array of floats 'in'.
* The reduction result is written to the
* address 'result'. The number of elements to
* be reduced is given by 'size'
*
* The example contains data races because barrier
* synchronisation statements, of the form:
* __syncthreads();
* are missing.
*
* Can you add them to eliminate all data races?
*/
#define N 4 /* Same as blockDim */
__global__ void reduce(int *in) {
int tid = threadIdx.x;
for(int d = N/2; d > 0; d >>= 1) {
if(tid < d) {
in[tid] += in[tid + d];
__syncthreads();
}
}
/* Master thread writes out result */
}
|
e68aaaf55840d8e37ad126bf2301927e274ee24c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ATen/ATen.h"
#include <THH/THHTensorMathReduce.cuh>
#include <math.h>
#include "ATen/native/Distance.h"
namespace at { namespace native {
namespace {
static const int forward_threads = 256;
template <typename scalar_t>
static __forceinline__ __device__ scalar_t device_sqrt(scalar_t val);
template <>
__forceinline__ __device__ float device_sqrt(float val) {
return ::sqrtf(val);
}
template <>
__forceinline__ __device__ double device_sqrt(double val) {
return ::sqrt(val);
}
template <typename scalar_t>
struct dists {
static __forceinline__ __device__ scalar_t sign(scalar_t val) {
return (0 < val) - (val < 0);
}
// Zero norm
struct zero {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff != 0.0; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
};
// One norm
struct one {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff); }
};
// Special case backward when p is less than two
struct lt_two {
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : sign(diff) * ::pow(std::abs(diff), p - 1) * grad / ::pow(dist, p - 1); }
};
// Two norm
struct two {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff * diff; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return device_sqrt<scalar_t>(agg); }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : grad * diff / dist; }
};
// General p norm
struct p {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += ::pow(diff, p); }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return ::pow(agg, static_cast<scalar_t>(1) / p); }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : diff * ::pow(std::abs(diff), p - 2) * grad / ::pow(dist, p - 1); }
};
// Inf norm
struct inf {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { if (diff > agg) { agg = diff; } }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { if (other > update) { update = other; } }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff) * (std::abs(diff) == dist); }
};
};
template <typename scalar_t, typename F>
__global__ static void pdist_kernel_cuda_impl(scalar_t * result, const scalar_t * self, const int64_t n, const int64_t m, const scalar_t p) {
const int k = blockIdx.x;
const int stride = blockDim.x;
float n2 = n - .5;
// The -1 accounts for floating point truncation issues
int64_t i = static_cast<int64_t>((n2 - device_sqrt<scalar_t>(n2 * n2 - 2 * k - 1)));
int64_t j = k - n * i + i * (i + 1) / 2 + i + 1;
const scalar_t * const start = self + i * m;
const scalar_t * const end = start + m;
const scalar_t * a = start + threadIdx.x;
const scalar_t * b = self + j * m + threadIdx.x;
scalar_t agg = 0.0;
for (; a < end; a += stride, b += stride) {
F::inc(agg, std::abs(*a - *b), p);
}
// Reduce warps
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
F::agg(agg, WARP_SHFL_DOWN(agg, offset));
}
// Reduce block
// This shared memory is significantly larger than necessary, but the
// assumption is that it's not a bottleneck, and this is simple
__shared__ scalar_t shared[forward_threads];
int lane = threadIdx.x % warpSize;
int warp_id = threadIdx.x / warpSize;
if (lane == 0) {
shared[warp_id] = agg;
}
__syncthreads();
agg = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0.0;
if (warp_id == 0) {
// Only reduce theads with nonzero data
for (int offset = blockDim.x / warpSize / 2; offset > 0; offset /= 2) {
F::agg(agg, WARP_SHFL_DOWN(agg, offset));
}
}
if (threadIdx.x == 0) {
result[k] = F::finish(agg, p);
}
}
template <typename scalar_t, typename F>
__global__ static void pdist_backward_kernel_cuda_impl(scalar_t * buffer, const scalar_t * grad, const scalar_t * self, const scalar_t * dist, int64_t gs, const int64_t n, const int64_t m, const int64_t combs, const scalar_t p) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int init = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (k >= combs) {
return;
}
float n2 = n - .5;
// The -1 accounts for floating point truncation issues
int64_t i = static_cast<int64_t>((n2 - device_sqrt<scalar_t>(n2 * n2 - 2 * k - 1)));
int64_t j = k - n * i + i * (i + 1) / 2 + i + 1;
int64_t ib = j - i - 1;
int64_t jb = n - 2 - i;
const scalar_t grad_k = grad[k * gs];
const scalar_t dist_k = dist[k];
const scalar_t * const start = self + i * m;
const scalar_t * const end = start + m;
const scalar_t * self_i = start + init;
const scalar_t * self_j = self + j * m + init;
scalar_t * buff_i = buffer + (ib * n + i) * m + init;
scalar_t * buff_j = buffer + (jb * n + j) * m + init;
for (; self_i < end; self_i += stride, self_j += stride, buff_i += stride, buff_j += stride) {
const scalar_t res = F::backward(*self_i - *self_j, grad_k, dist_k, p);
*buff_i = res;
*buff_j = -res;
}
}
void pdist_forward_kernel_impl(Tensor& result, const Tensor& self, double p) {
const dim3 grid(result.numel());
const dim3 block(forward_threads);
int64_t n = self.size(0);
int64_t m = self.size(1);
AT_DISPATCH_FLOATING_TYPES(self.type(), "pdist_cuda", [&] {
if (p == 0.0) {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::zero>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p);
} else if (p == 1.0) {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::one>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p);
} else if (p == 2.0) {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::two>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p);
} else if (std::isinf(p)) {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p);
} else {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::p>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p);
}
});
}
void pdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& self, const double p, const Tensor& dist) {
if (p == 0.0 || grad.numel() == 0 || self.numel() == 0) {
result.fill_(0);
return;
}
const int64_t n = result.size(0);
int64_t m = self.size(1);
const int block_x = 64;
const int block_y = 4;
const int grid_x = (m + block_x * 8 - 1) / (block_x * 8);
const int grid_y = (dist.numel() + block_y - 1) / block_y;
const dim3 grid(grid_x, grid_y);
const dim3 block(block_x, block_y);
Tensor buffer = result.type().tensor({n - 1, result.size(0), result.size(1)});
AT_DISPATCH_FLOATING_TYPES(self.type(), "pdist_cuda_backward", [&] {
if (p == 1.0) {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::one>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p);
} else if (p < 2.0) {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::lt_two>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p);
} else if (p == 2.0) {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::two>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p);
} else if (std::isinf(p)) {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p);
} else {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::p>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p);
}
});
at::sum_out(result, buffer, 0);
}
} // anonymous namespace
REGISTER_DISPATCH(pdist_forward_stub, &pdist_forward_kernel_impl);
REGISTER_DISPATCH(pdist_backward_stub, &pdist_backward_kernel_impl);
}} // at::native
|
e68aaaf55840d8e37ad126bf2301927e274ee24c.cu
|
#include "ATen/ATen.h"
#include <THC/THCTensorMathReduce.cuh>
#include <math.h>
#include "ATen/native/Distance.h"
namespace at { namespace native {
namespace {
static const int forward_threads = 256;
template <typename scalar_t>
static __forceinline__ __device__ scalar_t device_sqrt(scalar_t val);
template <>
__forceinline__ __device__ float device_sqrt(float val) {
return ::sqrtf(val);
}
template <>
__forceinline__ __device__ double device_sqrt(double val) {
return ::sqrt(val);
}
template <typename scalar_t>
struct dists {
static __forceinline__ __device__ scalar_t sign(scalar_t val) {
return (0 < val) - (val < 0);
}
// Zero norm
struct zero {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff != 0.0; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
};
// One norm
struct one {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff); }
};
// Special case backward when p is less than two
struct lt_two {
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : sign(diff) * std::pow(std::abs(diff), p - 1) * grad / std::pow(dist, p - 1); }
};
// Two norm
struct two {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff * diff; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return device_sqrt<scalar_t>(agg); }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : grad * diff / dist; }
};
// General p norm
struct p {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += std::pow(diff, p); }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return std::pow(agg, static_cast<scalar_t>(1) / p); }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : diff * std::pow(std::abs(diff), p - 2) * grad / std::pow(dist, p - 1); }
};
// Inf norm
struct inf {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { if (diff > agg) { agg = diff; } }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { if (other > update) { update = other; } }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff) * (std::abs(diff) == dist); }
};
};
template <typename scalar_t, typename F>
__global__ static void pdist_kernel_cuda_impl(scalar_t * result, const scalar_t * self, const int64_t n, const int64_t m, const scalar_t p) {
const int k = blockIdx.x;
const int stride = blockDim.x;
float n2 = n - .5;
// The -1 accounts for floating point truncation issues
int64_t i = static_cast<int64_t>((n2 - device_sqrt<scalar_t>(n2 * n2 - 2 * k - 1)));
int64_t j = k - n * i + i * (i + 1) / 2 + i + 1;
const scalar_t * const start = self + i * m;
const scalar_t * const end = start + m;
const scalar_t * a = start + threadIdx.x;
const scalar_t * b = self + j * m + threadIdx.x;
scalar_t agg = 0.0;
for (; a < end; a += stride, b += stride) {
F::inc(agg, std::abs(*a - *b), p);
}
// Reduce warps
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
F::agg(agg, WARP_SHFL_DOWN(agg, offset));
}
// Reduce block
// This shared memory is significantly larger than necessary, but the
// assumption is that it's not a bottleneck, and this is simple
__shared__ scalar_t shared[forward_threads];
int lane = threadIdx.x % warpSize;
int warp_id = threadIdx.x / warpSize;
if (lane == 0) {
shared[warp_id] = agg;
}
__syncthreads();
agg = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0.0;
if (warp_id == 0) {
// Only reduce theads with nonzero data
for (int offset = blockDim.x / warpSize / 2; offset > 0; offset /= 2) {
F::agg(agg, WARP_SHFL_DOWN(agg, offset));
}
}
if (threadIdx.x == 0) {
result[k] = F::finish(agg, p);
}
}
template <typename scalar_t, typename F>
__global__ static void pdist_backward_kernel_cuda_impl(scalar_t * buffer, const scalar_t * grad, const scalar_t * self, const scalar_t * dist, int64_t gs, const int64_t n, const int64_t m, const int64_t combs, const scalar_t p) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int init = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (k >= combs) {
return;
}
float n2 = n - .5;
// The -1 accounts for floating point truncation issues
int64_t i = static_cast<int64_t>((n2 - device_sqrt<scalar_t>(n2 * n2 - 2 * k - 1)));
int64_t j = k - n * i + i * (i + 1) / 2 + i + 1;
int64_t ib = j - i - 1;
int64_t jb = n - 2 - i;
const scalar_t grad_k = grad[k * gs];
const scalar_t dist_k = dist[k];
const scalar_t * const start = self + i * m;
const scalar_t * const end = start + m;
const scalar_t * self_i = start + init;
const scalar_t * self_j = self + j * m + init;
scalar_t * buff_i = buffer + (ib * n + i) * m + init;
scalar_t * buff_j = buffer + (jb * n + j) * m + init;
for (; self_i < end; self_i += stride, self_j += stride, buff_i += stride, buff_j += stride) {
const scalar_t res = F::backward(*self_i - *self_j, grad_k, dist_k, p);
*buff_i = res;
*buff_j = -res;
}
}
void pdist_forward_kernel_impl(Tensor& result, const Tensor& self, double p) {
const dim3 grid(result.numel());
const dim3 block(forward_threads);
int64_t n = self.size(0);
int64_t m = self.size(1);
AT_DISPATCH_FLOATING_TYPES(self.type(), "pdist_cuda", [&] {
if (p == 0.0) {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::zero><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p);
} else if (p == 1.0) {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::one><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p);
} else if (p == 2.0) {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::two><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p);
} else if (std::isinf(p)) {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p);
} else {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::p><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p);
}
});
}
void pdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& self, const double p, const Tensor& dist) {
if (p == 0.0 || grad.numel() == 0 || self.numel() == 0) {
result.fill_(0);
return;
}
const int64_t n = result.size(0);
int64_t m = self.size(1);
const int block_x = 64;
const int block_y = 4;
const int grid_x = (m + block_x * 8 - 1) / (block_x * 8);
const int grid_y = (dist.numel() + block_y - 1) / block_y;
const dim3 grid(grid_x, grid_y);
const dim3 block(block_x, block_y);
Tensor buffer = result.type().tensor({n - 1, result.size(0), result.size(1)});
AT_DISPATCH_FLOATING_TYPES(self.type(), "pdist_cuda_backward", [&] {
if (p == 1.0) {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::one><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p);
} else if (p < 2.0) {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::lt_two><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p);
} else if (p == 2.0) {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::two><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p);
} else if (std::isinf(p)) {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p);
} else {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::p><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p);
}
});
at::sum_out(result, buffer, 0);
}
} // anonymous namespace
REGISTER_DISPATCH(pdist_forward_stub, &pdist_forward_kernel_impl);
REGISTER_DISPATCH(pdist_backward_stub, &pdist_backward_kernel_impl);
}} // at::native
|
356b40f29212a2484f6756be91da392c2d63154d.hip
|
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native { namespace {
enum class OpType {GE, GT, LE, LT};
template<typename scalar_t>
struct CompareFunctor{
CompareFunctor(OpType op): op_(op) {};
OpType op_;
__device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const {
if (op_ == OpType::GE) {
return a >= b;
} else if (op_ == OpType::GT) {
return a > b;
} else if (op_ == OpType::LE) {
return a <= b;
} else { //LT
return a < b;
}
}
};
}
void ge_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "ge_cuda", [&]() {
gpu_kernel_with_scalars(iter, CompareFunctor<scalar_t>(OpType::GE));
});
}
void gt_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "gt_cuda", [&]() {
gpu_kernel_with_scalars(iter, CompareFunctor<scalar_t>(OpType::GT));
});
}
void le_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "le_cuda", [&]() {
gpu_kernel_with_scalars(iter, CompareFunctor<scalar_t>(OpType::LE));
});
}
void lt_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "lt_cuda", [&]() {
gpu_kernel_with_scalars(iter, CompareFunctor<scalar_t>(OpType::LT));
});
}
REGISTER_DISPATCH(ge_stub, &ge_kernel_cuda);
REGISTER_DISPATCH(gt_stub, >_kernel_cuda);
REGISTER_DISPATCH(le_stub, &le_kernel_cuda);
REGISTER_DISPATCH(lt_stub, <_kernel_cuda);
}} // namespace at::native
|
356b40f29212a2484f6756be91da392c2d63154d.cu
|
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native { namespace {
enum class OpType {GE, GT, LE, LT};
template<typename scalar_t>
struct CompareFunctor{
CompareFunctor(OpType op): op_(op) {};
OpType op_;
__device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const {
if (op_ == OpType::GE) {
return a >= b;
} else if (op_ == OpType::GT) {
return a > b;
} else if (op_ == OpType::LE) {
return a <= b;
} else { //LT
return a < b;
}
}
};
}
void ge_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "ge_cuda", [&]() {
gpu_kernel_with_scalars(iter, CompareFunctor<scalar_t>(OpType::GE));
});
}
void gt_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "gt_cuda", [&]() {
gpu_kernel_with_scalars(iter, CompareFunctor<scalar_t>(OpType::GT));
});
}
void le_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "le_cuda", [&]() {
gpu_kernel_with_scalars(iter, CompareFunctor<scalar_t>(OpType::LE));
});
}
void lt_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "lt_cuda", [&]() {
gpu_kernel_with_scalars(iter, CompareFunctor<scalar_t>(OpType::LT));
});
}
REGISTER_DISPATCH(ge_stub, &ge_kernel_cuda);
REGISTER_DISPATCH(gt_stub, >_kernel_cuda);
REGISTER_DISPATCH(le_stub, &le_kernel_cuda);
REGISTER_DISPATCH(lt_stub, <_kernel_cuda);
}} // namespace at::native
|
da6e255cf725571737eea79f9cd263031a307b59.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../shared/globals.hpp"
#include "../shared/timer.hpp"
#include "../shared/argument_parsing.cuh"
#include "../shared/graph.cuh"
#include "../shared/subgraph.cuh"
#include "../shared/partitioner.cuh"
#include "../shared/subgraph_generator.cuh"
#include "../shared/gpu_error_check.cuh"
#include "../shared/gpu_kernels.cuh"
#include "../shared/subway_utilities.hpp"
#include "../shared/test.cuh"
#include "../shared/test.cu"
int main(int argc, char** argv)
{
hipFree(0);
ArgumentParser arguments(argc, argv, true, false);
Timer timer;
timer.Start();
GraphPR<OutEdge> graph(arguments.input, true);
graph.ReadGraph();
float readtime = timer.Finish();
cout << "Graph Reading finished in " << readtime/1000 << " (s).\n";
//for(unsigned int i=0; i<100; i++)
// cout << graph.edgeList[i].end << " " << graph.edgeList[i].w8;
float initPR = 0.15;
float acc = 0.01;
for(unsigned int i=0; i<graph.num_nodes; i++)
{
graph.delta[i] = initPR;
graph.value[i] = 0;
}
//graph.value[arguments.sourceNode] = 0;
//graph.label[arguments.sourceNode] = true;
gpuErrorcheck(hipMemcpy(graph.d_outDegree, graph.outDegree, graph.num_nodes * sizeof(unsigned int), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(graph.d_value, graph.value, graph.num_nodes * sizeof(float), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(graph.d_delta, graph.delta, graph.num_nodes * sizeof(float), hipMemcpyHostToDevice));
Subgraph<OutEdge> subgraph(graph.num_nodes, graph.num_edges);
SubgraphGenerator<OutEdge> subgen(graph);
subgen.generate(graph, subgraph, acc);
Partitioner<OutEdge> partitioner;
timer.Start();
uint gItr = 0;
bool finished;
bool *d_finished;
gpuErrorcheck(hipMalloc(&d_finished, sizeof(bool)));
while (subgraph.numActiveNodes>0)
{
gItr++;
partitioner.partition(subgraph, subgraph.numActiveNodes);
// a super iteration
for(int i=0; i<partitioner.numPartitions; i++)
{
hipDeviceSynchronize();
gpuErrorcheck(hipMemcpy(subgraph.d_activeEdgeList, subgraph.activeEdgeList + partitioner.fromEdge[i], (partitioner.partitionEdgeSize[i]) * sizeof(OutEdge), hipMemcpyHostToDevice));
hipDeviceSynchronize();
//moveUpLabels<<< partitioner.partitionNodeSize[i]/512 + 1 , 512 >>>(subgraph.d_activeNodes, graph.d_label, partitioner.partitionNodeSize[i], partitioner.fromNode[i]);
//mixLabels<<<partitioner.partitionNodeSize[i]/512 + 1 , 512>>>(subgraph.d_activeNodes, graph.d_label1, graph.d_label2, partitioner.partitionNodeSize[i], partitioner.fromNode[i]);
uint itr = 0;
do
{
itr++;
finished = true;
gpuErrorcheck(hipMemcpy(d_finished, &finished, sizeof(bool), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( pr_async), dim3(partitioner.partitionNodeSize[i]/512 + 1) , dim3(512) , 0, 0, partitioner.partitionNodeSize[i],
partitioner.fromNode[i],
partitioner.fromEdge[i],
subgraph.d_activeNodes,
subgraph.d_activeNodesPointer,
subgraph.d_activeEdgeList,
graph.d_outDegree,
graph.d_value,
graph.d_delta,
d_finished,
acc);
hipDeviceSynchronize();
gpuErrorcheck( hipPeekAtLastError() );
gpuErrorcheck(hipMemcpy(&finished, d_finished, sizeof(bool), hipMemcpyDeviceToHost));
}while(!(finished));
cout << itr << ((itr>1) ? " Inner Iterations" : " Inner Iteration") << " in Global Iteration " << gItr << ", Partition " << i << endl;
}
subgen.generate(graph, subgraph, acc);
}
float runtime = timer.Finish();
cout << "Processing finished in " << runtime/1000 << " (s).\n";
gpuErrorcheck(hipMemcpy(graph.value, graph.d_value, graph.num_nodes*sizeof(float), hipMemcpyDeviceToHost));
utilities::PrintResults(graph.value, min(30, graph.num_nodes));
if(arguments.hasOutput)
utilities::SaveResults(arguments.output, graph.value, graph.num_nodes);
}
|
da6e255cf725571737eea79f9cd263031a307b59.cu
|
#include "../shared/globals.hpp"
#include "../shared/timer.hpp"
#include "../shared/argument_parsing.cuh"
#include "../shared/graph.cuh"
#include "../shared/subgraph.cuh"
#include "../shared/partitioner.cuh"
#include "../shared/subgraph_generator.cuh"
#include "../shared/gpu_error_check.cuh"
#include "../shared/gpu_kernels.cuh"
#include "../shared/subway_utilities.hpp"
#include "../shared/test.cuh"
#include "../shared/test.cu"
int main(int argc, char** argv)
{
cudaFree(0);
ArgumentParser arguments(argc, argv, true, false);
Timer timer;
timer.Start();
GraphPR<OutEdge> graph(arguments.input, true);
graph.ReadGraph();
float readtime = timer.Finish();
cout << "Graph Reading finished in " << readtime/1000 << " (s).\n";
//for(unsigned int i=0; i<100; i++)
// cout << graph.edgeList[i].end << " " << graph.edgeList[i].w8;
float initPR = 0.15;
float acc = 0.01;
for(unsigned int i=0; i<graph.num_nodes; i++)
{
graph.delta[i] = initPR;
graph.value[i] = 0;
}
//graph.value[arguments.sourceNode] = 0;
//graph.label[arguments.sourceNode] = true;
gpuErrorcheck(cudaMemcpy(graph.d_outDegree, graph.outDegree, graph.num_nodes * sizeof(unsigned int), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(graph.d_value, graph.value, graph.num_nodes * sizeof(float), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(graph.d_delta, graph.delta, graph.num_nodes * sizeof(float), cudaMemcpyHostToDevice));
Subgraph<OutEdge> subgraph(graph.num_nodes, graph.num_edges);
SubgraphGenerator<OutEdge> subgen(graph);
subgen.generate(graph, subgraph, acc);
Partitioner<OutEdge> partitioner;
timer.Start();
uint gItr = 0;
bool finished;
bool *d_finished;
gpuErrorcheck(cudaMalloc(&d_finished, sizeof(bool)));
while (subgraph.numActiveNodes>0)
{
gItr++;
partitioner.partition(subgraph, subgraph.numActiveNodes);
// a super iteration
for(int i=0; i<partitioner.numPartitions; i++)
{
cudaDeviceSynchronize();
gpuErrorcheck(cudaMemcpy(subgraph.d_activeEdgeList, subgraph.activeEdgeList + partitioner.fromEdge[i], (partitioner.partitionEdgeSize[i]) * sizeof(OutEdge), cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
//moveUpLabels<<< partitioner.partitionNodeSize[i]/512 + 1 , 512 >>>(subgraph.d_activeNodes, graph.d_label, partitioner.partitionNodeSize[i], partitioner.fromNode[i]);
//mixLabels<<<partitioner.partitionNodeSize[i]/512 + 1 , 512>>>(subgraph.d_activeNodes, graph.d_label1, graph.d_label2, partitioner.partitionNodeSize[i], partitioner.fromNode[i]);
uint itr = 0;
do
{
itr++;
finished = true;
gpuErrorcheck(cudaMemcpy(d_finished, &finished, sizeof(bool), cudaMemcpyHostToDevice));
pr_async<<< partitioner.partitionNodeSize[i]/512 + 1 , 512 >>>(partitioner.partitionNodeSize[i],
partitioner.fromNode[i],
partitioner.fromEdge[i],
subgraph.d_activeNodes,
subgraph.d_activeNodesPointer,
subgraph.d_activeEdgeList,
graph.d_outDegree,
graph.d_value,
graph.d_delta,
d_finished,
acc);
cudaDeviceSynchronize();
gpuErrorcheck( cudaPeekAtLastError() );
gpuErrorcheck(cudaMemcpy(&finished, d_finished, sizeof(bool), cudaMemcpyDeviceToHost));
}while(!(finished));
cout << itr << ((itr>1) ? " Inner Iterations" : " Inner Iteration") << " in Global Iteration " << gItr << ", Partition " << i << endl;
}
subgen.generate(graph, subgraph, acc);
}
float runtime = timer.Finish();
cout << "Processing finished in " << runtime/1000 << " (s).\n";
gpuErrorcheck(cudaMemcpy(graph.value, graph.d_value, graph.num_nodes*sizeof(float), cudaMemcpyDeviceToHost));
utilities::PrintResults(graph.value, min(30, graph.num_nodes));
if(arguments.hasOutput)
utilities::SaveResults(arguments.output, graph.value, graph.num_nodes);
}
|
4305ba2b20063f12eb8a9851af74856e94cff76a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Christian Noboa Mardini <[email protected]>
* Copyright 2019 William Scott Malpica <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <utilities/column_utils.hpp>
#include <cudf/copying.hpp>
#include <cudf/types.hpp>
#include <utilities/type_dispatcher.hpp>
#include <utilities/error_utils.hpp>
#include <utilities/cuda_utils.hpp>
#include <utilities/bit_util.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <nvstrings/NVCategory.h>
#include <bitmask/bit_mask.cuh>
#include <copying/slice.hpp>
namespace cudf {
namespace {
using bit_mask_t = bit_mask::bit_mask_t;
/**
* @brief Improve the readability of the source code.
* Parameter for the CUDA kernel.
*/
constexpr std::size_t NO_DYNAMIC_MEMORY = 0;
template <typename ColumnType>
__global__
void slice_data_kernel(ColumnType* output_data,
ColumnType const* input_data,
gdf_index_type const* indices,
gdf_index_type const indices_position) {
gdf_index_type input_offset = indices[indices_position*2]; /**< The start index position of the input data. */
gdf_size_type row_size = indices[indices_position*2 + 1] - input_offset;
// Calculate kernel parameters
gdf_size_type row_index = threadIdx.x + blockIdx.x * blockDim.x;
gdf_size_type row_step = blockDim.x * gridDim.x;
// Perform the copying operation
while (row_index < row_size) {
output_data[row_index] = input_data[input_offset + row_index];
row_index += row_step;
}
}
/** @brief This function copies a slice of a bitmask.
*
* If the slice is from element 10 to element 40, element 10 corresponds to bit 3 of the second byte,
* that bit needs to become bit 0. So we are reading two adjacent blocks and bitshifting them together,
* to then write one block. We also take care that if the last bits of a bit_mask_t block don't
* correspond to this slice, then we to apply a mask to clear those bits.
*/
__global__
void slice_bitmask_kernel(bit_mask_t* output_bitmask,
gdf_size_type* output_null_count,
bit_mask_t const* input_bitmask,
gdf_size_type const input_size,
gdf_index_type const* indices,
gdf_size_type const indices_size,
gdf_index_type const indices_position) {
// Obtain the indices for copying
gdf_index_type input_index_begin = indices[indices_position * 2];
gdf_index_type input_index_end = indices[indices_position * 2 + 1];
gdf_index_type input_offset = cudf::util::detail::bit_container_index<bit_mask_t, gdf_index_type>(input_index_begin);
gdf_index_type rotate_input = cudf::util::detail::intra_container_index<bit_mask_t, gdf_index_type>(input_index_begin);
bit_mask_t mask_last = (bit_mask_t{1} << ((input_index_end - input_index_begin) % bit_mask::bits_per_element)) - bit_mask_t{1};
gdf_size_type input_block_length = bit_mask::num_elements(input_size);
gdf_size_type partition_block_length = bit_mask::num_elements(input_index_end - input_index_begin);
// Calculate kernel parameters
gdf_size_type row_index = threadIdx.x + blockIdx.x * blockDim.x;
gdf_size_type row_step = blockDim.x * gridDim.x;
// Perform the copying operation
while (row_index < partition_block_length) {
// load data into one or two adjacent bitmask blocks
if (rotate_input == 0){
output_bitmask[row_index] = input_bitmask[input_offset + row_index];
} else {
bit_mask_t lower_value = input_bitmask[input_offset + row_index];
bit_mask_t upper_value = bit_mask_t{0};
if (row_index < (input_block_length - 1)) {
upper_value = input_bitmask[input_offset + row_index + 1];
}
// Perform rotation
output_bitmask[row_index] = __funnelshift_rc(lower_value, upper_value, rotate_input);
}
// Apply mask for the last value in the bitmask
if ((row_index == (partition_block_length - 1)) && mask_last) {
output_bitmask[row_index] &= mask_last;
}
// Perform null bitmask null count
std::uint32_t null_count_value = __popc(output_bitmask[row_index]); // Count the number of bits that are set to 1 in a 32 bit integer.
atomicAdd(output_null_count, null_count_value);
row_index += row_step;
}
}
class Slice {
public:
Slice(gdf_column const & input_column,
gdf_index_type const* indices,
gdf_size_type num_indices,
std::vector<gdf_column*> const & output_columns,
std::vector<hipStream_t> const & streams)
: input_column_(input_column), indices_(indices), num_indices_(num_indices),
output_columns_(output_columns), streams_(streams) { }
public:
template <typename ColumnType>
void operator()() {
gdf_size_type columns_quantity = output_columns_.size();
// Perform operation
for (gdf_index_type index = 0; index < columns_quantity; ++index) {
// Empty output column
if (output_columns_[index]->size == 0) {
continue;
}
// Create a new cuda variable for null count in the bitmask
rmm::device_vector<gdf_size_type> bit_set_counter(1, 0);
// Gather stream
hipStream_t stream = get_stream(index);
// Allocate Column
gdf_column* output_column = output_columns_[index];
auto col_width { cudf::byte_width(*output_column) };
RMM_TRY( RMM_ALLOC(&(output_column->data), col_width * output_column->size, stream) );
if(input_column_.valid != nullptr){
RMM_TRY( RMM_ALLOC(&(output_column->valid), sizeof(gdf_valid_type)*gdf_valid_allocation_size(output_column->size), stream) );
} else {
output_column->valid = nullptr;
}
// Configure grid for data kernel launch
auto data_grid_config = cudf::util::cuda::grid_config_1d(output_column->size, 256);
// Make a copy of the data in the gdf_column
hipLaunchKernelGGL(( slice_data_kernel<ColumnType>)
,
dim3(data_grid_config.num_blocks),
dim3(data_grid_config.num_threads_per_block),
NO_DYNAMIC_MEMORY,
stream
,
static_cast<ColumnType*>(output_column->data),
static_cast<ColumnType const*>(input_column_.data),
indices_,
index
);
if(input_column_.valid != nullptr){
// Configure grid for bit mask kernel launch
auto valid_grid_config = cudf::util::cuda::grid_config_1d(gdf_num_bitmask_elements(output_column->size), 256);
// Make a copy of the bitmask in the gdf_column
hipLaunchKernelGGL(( slice_bitmask_kernel)
,
dim3(valid_grid_config.num_blocks),
dim3(valid_grid_config.num_threads_per_block),
NO_DYNAMIC_MEMORY,
stream
,
reinterpret_cast<bit_mask_t*>(output_column->valid),
bit_set_counter.data().get(),
reinterpret_cast<bit_mask_t const*>(input_column_.valid),
input_column_.size,
indices_,
num_indices_,
index
);
CHECK_STREAM(stream);
// Update the other fields in the output column
gdf_size_type num_nulls;
CUDA_TRY(hipMemcpyAsync(&num_nulls, bit_set_counter.data().get(), sizeof(gdf_size_type),
hipMemcpyDeviceToHost, stream));
output_column->null_count = output_column->size - num_nulls;
} else {
output_column->null_count = 0;
}
if (output_column->dtype == GDF_STRING_CATEGORY){
NVCategory* new_category = static_cast<NVCategory*>(input_column_.dtype_info.category)->gather_and_remap(
static_cast<int*>(output_column->data), (unsigned int)output_column->size);
output_column->dtype_info.category = new_category;
}
}
}
private:
hipStream_t get_stream(gdf_index_type index) {
if (streams_.size() == 0) {
return hipStream_t{nullptr};
}
return streams_[index % streams_.size()];
}
gdf_column const input_column_;
gdf_index_type const* indices_;
gdf_size_type num_indices_;
std::vector<gdf_column*> const output_columns_;
std::vector<hipStream_t> streams_;
};
} // namespace
namespace detail {
std::vector<gdf_column*> slice(gdf_column const & input_column,
gdf_index_type const* indices,
gdf_size_type num_indices,
std::vector<hipStream_t> const & streams) {
std::vector<gdf_column*> output_columns;
if (num_indices == 0 || indices == nullptr) {
return output_columns;
}
if (input_column.size == 0) {
return output_columns;
}
CUDF_EXPECTS(input_column.data != nullptr, "input column data is null");
CUDF_EXPECTS((num_indices % 2) == 0, "indices size must be even");
// Get indexes on host side
std::vector<gdf_size_type> host_indices(num_indices);
CUDA_TRY( hipMemcpy(host_indices.data(), indices, num_indices * sizeof(gdf_size_type), hipMemcpyDeviceToHost) );
// Initialize output_columns
output_columns.resize(num_indices/2);
for (gdf_size_type i = 0; i < num_indices/2; i++){
output_columns[i] = new gdf_column{};
gdf_column_view_augmented(output_columns[i],
nullptr,
nullptr,
host_indices[2*i + 1] - host_indices[2*i],
input_column.dtype,
0,
{input_column.dtype_info.time_unit, nullptr});
}
// Create slice helper class
Slice slice(input_column, indices, num_indices, output_columns, streams);
// Perform cudf operation
cudf::type_dispatcher(input_column.dtype, slice);
return output_columns;
}
} // namespace detail
std::vector<gdf_column*> slice(gdf_column const & input_column,
gdf_index_type const* indices,
gdf_size_type num_indices) {
return cudf::detail::slice(input_column, indices, num_indices);
}
} // namespace cudf
|
4305ba2b20063f12eb8a9851af74856e94cff76a.cu
|
/*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Christian Noboa Mardini <[email protected]>
* Copyright 2019 William Scott Malpica <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <utilities/column_utils.hpp>
#include <cudf/copying.hpp>
#include <cudf/types.hpp>
#include <utilities/type_dispatcher.hpp>
#include <utilities/error_utils.hpp>
#include <utilities/cuda_utils.hpp>
#include <utilities/bit_util.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <nvstrings/NVCategory.h>
#include <bitmask/bit_mask.cuh>
#include <copying/slice.hpp>
namespace cudf {
namespace {
using bit_mask_t = bit_mask::bit_mask_t;
/**
* @brief Improve the readability of the source code.
* Parameter for the CUDA kernel.
*/
constexpr std::size_t NO_DYNAMIC_MEMORY = 0;
template <typename ColumnType>
__global__
void slice_data_kernel(ColumnType* output_data,
ColumnType const* input_data,
gdf_index_type const* indices,
gdf_index_type const indices_position) {
gdf_index_type input_offset = indices[indices_position*2]; /**< The start index position of the input data. */
gdf_size_type row_size = indices[indices_position*2 + 1] - input_offset;
// Calculate kernel parameters
gdf_size_type row_index = threadIdx.x + blockIdx.x * blockDim.x;
gdf_size_type row_step = blockDim.x * gridDim.x;
// Perform the copying operation
while (row_index < row_size) {
output_data[row_index] = input_data[input_offset + row_index];
row_index += row_step;
}
}
/** @brief This function copies a slice of a bitmask.
*
* If the slice is from element 10 to element 40, element 10 corresponds to bit 3 of the second byte,
* that bit needs to become bit 0. So we are reading two adjacent blocks and bitshifting them together,
* to then write one block. We also take care that if the last bits of a bit_mask_t block don't
* correspond to this slice, then we to apply a mask to clear those bits.
*/
__global__
void slice_bitmask_kernel(bit_mask_t* output_bitmask,
gdf_size_type* output_null_count,
bit_mask_t const* input_bitmask,
gdf_size_type const input_size,
gdf_index_type const* indices,
gdf_size_type const indices_size,
gdf_index_type const indices_position) {
// Obtain the indices for copying
gdf_index_type input_index_begin = indices[indices_position * 2];
gdf_index_type input_index_end = indices[indices_position * 2 + 1];
gdf_index_type input_offset = cudf::util::detail::bit_container_index<bit_mask_t, gdf_index_type>(input_index_begin);
gdf_index_type rotate_input = cudf::util::detail::intra_container_index<bit_mask_t, gdf_index_type>(input_index_begin);
bit_mask_t mask_last = (bit_mask_t{1} << ((input_index_end - input_index_begin) % bit_mask::bits_per_element)) - bit_mask_t{1};
gdf_size_type input_block_length = bit_mask::num_elements(input_size);
gdf_size_type partition_block_length = bit_mask::num_elements(input_index_end - input_index_begin);
// Calculate kernel parameters
gdf_size_type row_index = threadIdx.x + blockIdx.x * blockDim.x;
gdf_size_type row_step = blockDim.x * gridDim.x;
// Perform the copying operation
while (row_index < partition_block_length) {
// load data into one or two adjacent bitmask blocks
if (rotate_input == 0){
output_bitmask[row_index] = input_bitmask[input_offset + row_index];
} else {
bit_mask_t lower_value = input_bitmask[input_offset + row_index];
bit_mask_t upper_value = bit_mask_t{0};
if (row_index < (input_block_length - 1)) {
upper_value = input_bitmask[input_offset + row_index + 1];
}
// Perform rotation
output_bitmask[row_index] = __funnelshift_rc(lower_value, upper_value, rotate_input);
}
// Apply mask for the last value in the bitmask
if ((row_index == (partition_block_length - 1)) && mask_last) {
output_bitmask[row_index] &= mask_last;
}
// Perform null bitmask null count
std::uint32_t null_count_value = __popc(output_bitmask[row_index]); // Count the number of bits that are set to 1 in a 32 bit integer.
atomicAdd(output_null_count, null_count_value);
row_index += row_step;
}
}
class Slice {
public:
Slice(gdf_column const & input_column,
gdf_index_type const* indices,
gdf_size_type num_indices,
std::vector<gdf_column*> const & output_columns,
std::vector<cudaStream_t> const & streams)
: input_column_(input_column), indices_(indices), num_indices_(num_indices),
output_columns_(output_columns), streams_(streams) { }
public:
template <typename ColumnType>
void operator()() {
gdf_size_type columns_quantity = output_columns_.size();
// Perform operation
for (gdf_index_type index = 0; index < columns_quantity; ++index) {
// Empty output column
if (output_columns_[index]->size == 0) {
continue;
}
// Create a new cuda variable for null count in the bitmask
rmm::device_vector<gdf_size_type> bit_set_counter(1, 0);
// Gather stream
cudaStream_t stream = get_stream(index);
// Allocate Column
gdf_column* output_column = output_columns_[index];
auto col_width { cudf::byte_width(*output_column) };
RMM_TRY( RMM_ALLOC(&(output_column->data), col_width * output_column->size, stream) );
if(input_column_.valid != nullptr){
RMM_TRY( RMM_ALLOC(&(output_column->valid), sizeof(gdf_valid_type)*gdf_valid_allocation_size(output_column->size), stream) );
} else {
output_column->valid = nullptr;
}
// Configure grid for data kernel launch
auto data_grid_config = cudf::util::cuda::grid_config_1d(output_column->size, 256);
// Make a copy of the data in the gdf_column
slice_data_kernel<ColumnType>
<<<
data_grid_config.num_blocks,
data_grid_config.num_threads_per_block,
NO_DYNAMIC_MEMORY,
stream
>>>(
static_cast<ColumnType*>(output_column->data),
static_cast<ColumnType const*>(input_column_.data),
indices_,
index
);
if(input_column_.valid != nullptr){
// Configure grid for bit mask kernel launch
auto valid_grid_config = cudf::util::cuda::grid_config_1d(gdf_num_bitmask_elements(output_column->size), 256);
// Make a copy of the bitmask in the gdf_column
slice_bitmask_kernel
<<<
valid_grid_config.num_blocks,
valid_grid_config.num_threads_per_block,
NO_DYNAMIC_MEMORY,
stream
>>>(
reinterpret_cast<bit_mask_t*>(output_column->valid),
bit_set_counter.data().get(),
reinterpret_cast<bit_mask_t const*>(input_column_.valid),
input_column_.size,
indices_,
num_indices_,
index
);
CHECK_STREAM(stream);
// Update the other fields in the output column
gdf_size_type num_nulls;
CUDA_TRY(cudaMemcpyAsync(&num_nulls, bit_set_counter.data().get(), sizeof(gdf_size_type),
cudaMemcpyDeviceToHost, stream));
output_column->null_count = output_column->size - num_nulls;
} else {
output_column->null_count = 0;
}
if (output_column->dtype == GDF_STRING_CATEGORY){
NVCategory* new_category = static_cast<NVCategory*>(input_column_.dtype_info.category)->gather_and_remap(
static_cast<int*>(output_column->data), (unsigned int)output_column->size);
output_column->dtype_info.category = new_category;
}
}
}
private:
cudaStream_t get_stream(gdf_index_type index) {
if (streams_.size() == 0) {
return cudaStream_t{nullptr};
}
return streams_[index % streams_.size()];
}
gdf_column const input_column_;
gdf_index_type const* indices_;
gdf_size_type num_indices_;
std::vector<gdf_column*> const output_columns_;
std::vector<cudaStream_t> streams_;
};
} // namespace
namespace detail {
std::vector<gdf_column*> slice(gdf_column const & input_column,
gdf_index_type const* indices,
gdf_size_type num_indices,
std::vector<cudaStream_t> const & streams) {
std::vector<gdf_column*> output_columns;
if (num_indices == 0 || indices == nullptr) {
return output_columns;
}
if (input_column.size == 0) {
return output_columns;
}
CUDF_EXPECTS(input_column.data != nullptr, "input column data is null");
CUDF_EXPECTS((num_indices % 2) == 0, "indices size must be even");
// Get indexes on host side
std::vector<gdf_size_type> host_indices(num_indices);
CUDA_TRY( cudaMemcpy(host_indices.data(), indices, num_indices * sizeof(gdf_size_type), cudaMemcpyDeviceToHost) );
// Initialize output_columns
output_columns.resize(num_indices/2);
for (gdf_size_type i = 0; i < num_indices/2; i++){
output_columns[i] = new gdf_column{};
gdf_column_view_augmented(output_columns[i],
nullptr,
nullptr,
host_indices[2*i + 1] - host_indices[2*i],
input_column.dtype,
0,
{input_column.dtype_info.time_unit, nullptr});
}
// Create slice helper class
Slice slice(input_column, indices, num_indices, output_columns, streams);
// Perform cudf operation
cudf::type_dispatcher(input_column.dtype, slice);
return output_columns;
}
} // namespace detail
std::vector<gdf_column*> slice(gdf_column const & input_column,
gdf_index_type const* indices,
gdf_size_type num_indices) {
return cudf::detail::slice(input_column, indices, num_indices);
}
} // namespace cudf
|
bb752459593bee0409784471f01f3323ea8d2b51.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/sequence_scale.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
namespace math {
using platform::PADDLE_CUDA_NUM_THREADS;
template <typename T, int BlockSize>
__global__ void SequenceScaleKernel(T* seq, size_t* lod, const T* scales,
const size_t seq_width) {
for (int i = threadIdx.x;
i < (lod[blockIdx.x + 1] - lod[blockIdx.x]) * seq_width;
i += BlockSize) {
int idx = lod[blockIdx.x] * seq_width + i;
seq[idx] *= scales[blockIdx.x];
}
}
template <typename T>
class ScaleLoDTensorFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context, const T* scales,
framework::LoDTensor* seq) {
const size_t level = 0;
auto lod = seq->lod();
const size_t num_seq = lod[level].size() - 1;
const size_t seq_width = seq->numel() / seq->dims()[0];
framework::LoD abs_offset_lod = framework::ToAbsOffset(lod);
T* seq_data = seq->mutable_data<T>(context.GetPlace());
hipLaunchKernelGGL(( SequenceScaleKernel<T, PADDLE_CUDA_NUM_THREADS>),
dim3(num_seq), dim3(PADDLE_CUDA_NUM_THREADS), 0, context.stream(),
seq_data, abs_offset_lod[level].CUDAMutableData(context.GetPlace()),
scales, seq_width);
}
};
template class ScaleLoDTensorFunctor<platform::CUDADeviceContext, float>;
} // namespace math
} // namespace operators
} // namespace paddle
|
bb752459593bee0409784471f01f3323ea8d2b51.cu
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/sequence_scale.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
namespace math {
using platform::PADDLE_CUDA_NUM_THREADS;
template <typename T, int BlockSize>
__global__ void SequenceScaleKernel(T* seq, size_t* lod, const T* scales,
const size_t seq_width) {
for (int i = threadIdx.x;
i < (lod[blockIdx.x + 1] - lod[blockIdx.x]) * seq_width;
i += BlockSize) {
int idx = lod[blockIdx.x] * seq_width + i;
seq[idx] *= scales[blockIdx.x];
}
}
template <typename T>
class ScaleLoDTensorFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context, const T* scales,
framework::LoDTensor* seq) {
const size_t level = 0;
auto lod = seq->lod();
const size_t num_seq = lod[level].size() - 1;
const size_t seq_width = seq->numel() / seq->dims()[0];
framework::LoD abs_offset_lod = framework::ToAbsOffset(lod);
T* seq_data = seq->mutable_data<T>(context.GetPlace());
SequenceScaleKernel<T, PADDLE_CUDA_NUM_THREADS><<<
num_seq, PADDLE_CUDA_NUM_THREADS, 0, context.stream()>>>(
seq_data, abs_offset_lod[level].CUDAMutableData(context.GetPlace()),
scales, seq_width);
}
};
template class ScaleLoDTensorFunctor<platform::CUDADeviceContext, float>;
} // namespace math
} // namespace operators
} // namespace paddle
|
8ef228f7ce5e69f639f2843ff95e16b54649923a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void tanh_double(int n,int idx,double *dy,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = tanh(dy[i]);
}
}
|
8ef228f7ce5e69f639f2843ff95e16b54649923a.cu
|
#include "includes.h"
__global__ void tanh_double(int n,int idx,double *dy,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = tanh(dy[i]);
}
}
|
2b8340c63f63077eff0b066b69533d6aa6a7f4b7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*! \file CTU_1D_cuda.cu
* \brief Definitions of the cuda CTU algorithm functions. */
#ifdef CUDA
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include"global.h"
#include"global_cuda.h"
#include"hydro_cuda.h"
#include"CTU_1D_cuda.h"
#include"pcm_cuda.h"
#include"plmp_cuda.h"
#include"plmc_cuda.h"
#include"ppmp_cuda.h"
#include"ppmc_cuda.h"
#include"exact_cuda.h"
#include"roe_cuda.h"
#include"hllc_cuda.h"
#include"cooling_cuda.h"
#include"error_handling.h"
#include"io.h"
Real CTU_Algorithm_1D_CUDA(Real *host_conserved0, Real *host_conserved1, int nx, int x_off, int n_ghost, Real dx, Real xbound, Real dt,int n_fields)
{
//Here, *host_conserved contains the entire
//set of conserved variables on the grid
//host_conserved0 contains the values at time n
//host_conserved1 will contain the values at time n+1
int n_cells = nx;
int ny = 1;
int nz = 1;
// set the dimensions of the cuda grid
int ngrid = (n_cells + TPB - 1) / TPB;
dim3 dimGrid(ngrid, 1, 1);
dim3 dimBlock(TPB, 1, 1);
// allocate an array on the CPU to hold max_dti returned from each thread block
Real max_dti = 0;
Real *host_dti_array;
host_dti_array = (Real *) malloc(ngrid*sizeof(Real));
#ifdef COOLING_GPU
Real min_dt = 1e10;
Real *host_dt_array;
host_dt_array = (Real *) malloc(ngrid*sizeof(Real));
#endif
// allocate GPU arrays
// conserved variables
Real *dev_conserved;
// initial input states and associated interface fluxes (Q* and F* from Stone, 2008)
Real *Q_L, *Q_R, *F;
// array to hold zero values for H correction (necessary to pass to Roe solver)
Real *etah;
// array of inverse timesteps for dt calculation
Real *dev_dti_array;
#if defined COOLING_GPU
// array of timesteps for dt calculation (cooling restriction)
Real *dev_dt_array;
#endif
// allocate memory on the GPU
CudaSafeCall( hipMalloc((void**)&dev_conserved, n_fields*n_cells*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&Q_L, n_fields*n_cells*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&Q_R, n_fields*n_cells*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&F, (n_fields)*n_cells*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&etah, n_cells*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&dev_dti_array, ngrid*sizeof(Real)) );
#if defined COOLING_GPU
CudaSafeCall( hipMalloc((void**)&dev_dt_array, ngrid*sizeof(Real)) );
#endif
// copy the conserved variable array onto the GPU
CudaSafeCall( hipMemcpy(dev_conserved, host_conserved0, n_fields*n_cells*sizeof(Real), hipMemcpyHostToDevice) );
CudaCheckError();
// Step 1: Do the reconstruction
#ifdef PCM
hipLaunchKernelGGL(( PCM_Reconstruction_1D), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_conserved, Q_L, Q_R, nx, n_ghost, gama, n_fields);
CudaCheckError();
#endif
#ifdef PLMP
hipLaunchKernelGGL(( PLMP_cuda), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_conserved, Q_L, Q_R, nx, ny, nz, n_ghost, dx, dt, gama, 0, n_fields);
CudaCheckError();
#endif
#ifdef PLMC
hipLaunchKernelGGL(( PLMC_cuda), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_conserved, Q_L, Q_R, nx, ny, nz, n_ghost, dx, dt, gama, 0, n_fields);
CudaCheckError();
#endif
#ifdef PPMP
hipLaunchKernelGGL(( PPMP_cuda), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_conserved, Q_L, Q_R, nx, ny, nz, n_ghost, dx, dt, gama, 0, n_fields);
CudaCheckError();
#endif
#ifdef PPMC
hipLaunchKernelGGL(( PPMC_cuda), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_conserved, Q_L, Q_R, nx, ny, nz, n_ghost, dx, dt, gama, 0, n_fields);
CudaCheckError();
#endif
// Step 2: Calculate the fluxes
#ifdef EXACT
hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dimGrid),dim3(dimBlock), 0, 0, Q_L, Q_R, F, nx, ny, nz, n_ghost, gama, 0, n_fields);
#endif
#ifdef ROE
hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dimGrid),dim3(dimBlock), 0, 0, Q_L, Q_R, F, nx, ny, nz, n_ghost, gama, etah, 0, n_fields);
#endif
#ifdef HLLC
hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dimGrid),dim3(dimBlock), 0, 0, Q_L, Q_R, F, nx, ny, nz, n_ghost, gama, etah, 0, n_fields);
#endif
CudaCheckError();
// Step 3: Update the conserved variable array
hipLaunchKernelGGL(( Update_Conserved_Variables_1D), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_conserved, F, n_cells, x_off, n_ghost, dx, xbound, dt, gama, n_fields);
CudaCheckError();
// Sychronize the total and internal energy, if using dual-energy formalism
#ifdef DE
hipLaunchKernelGGL(( Sync_Energies_1D), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_conserved, n_cells, n_ghost, gama, n_fields);
CudaCheckError();
#endif
// Apply cooling
#ifdef COOLING_GPU
hipLaunchKernelGGL(( cooling_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_conserved, nx, ny, nz, n_ghost, n_fields, dt, gama, dev_dti_array);
CudaCheckError();
#endif
// Calculate the next timestep
hipLaunchKernelGGL(( Calc_dt_1D), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_conserved, n_cells, n_ghost, dx, dev_dti_array, gama);
CudaCheckError();
// copy the conserved variable array back to the CPU
CudaSafeCall( hipMemcpy(host_conserved1, dev_conserved, n_fields*n_cells*sizeof(Real), hipMemcpyDeviceToHost) );
// copy the dti array onto the CPU
CudaSafeCall( hipMemcpy(host_dti_array, dev_dti_array, ngrid*sizeof(Real), hipMemcpyDeviceToHost) );
// iterate through to find the maximum inverse dt for this subgrid block
for (int i=0; i<ngrid; i++) {
max_dti = fmax(max_dti, host_dti_array[i]);
}
#if defined COOLING_GPU
// copy the dt array from cooling onto the CPU
CudaSafeCall( hipMemcpy(host_dt_array, dev_dt_array, ngrid*sizeof(Real), hipMemcpyDeviceToHost) );
// find maximum inverse timestep from cooling time
for (int i=0; i<ngrid; i++) {
min_dt = fmin(min_dt, host_dt_array[i]);
}
if (min_dt < C_cfl/max_dti) {
max_dti = C_cfl/min_dt;
}
#endif
// free the CPU memory
free(host_dti_array);
#if defined COOLING_GPU
free(host_dt_array);
#endif
// free the GPU memory
hipFree(dev_conserved);
hipFree(Q_L);
hipFree(Q_R);
hipFree(F);
hipFree(etah);
hipFree(dev_dti_array);
#if defined COOLING_GPU
hipFree(dev_dt_array);
#endif
// return the maximum inverse timestep
return max_dti;
}
#endif //CUDA
|
2b8340c63f63077eff0b066b69533d6aa6a7f4b7.cu
|
/*! \file CTU_1D_cuda.cu
* \brief Definitions of the cuda CTU algorithm functions. */
#ifdef CUDA
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include"global.h"
#include"global_cuda.h"
#include"hydro_cuda.h"
#include"CTU_1D_cuda.h"
#include"pcm_cuda.h"
#include"plmp_cuda.h"
#include"plmc_cuda.h"
#include"ppmp_cuda.h"
#include"ppmc_cuda.h"
#include"exact_cuda.h"
#include"roe_cuda.h"
#include"hllc_cuda.h"
#include"cooling_cuda.h"
#include"error_handling.h"
#include"io.h"
Real CTU_Algorithm_1D_CUDA(Real *host_conserved0, Real *host_conserved1, int nx, int x_off, int n_ghost, Real dx, Real xbound, Real dt,int n_fields)
{
//Here, *host_conserved contains the entire
//set of conserved variables on the grid
//host_conserved0 contains the values at time n
//host_conserved1 will contain the values at time n+1
int n_cells = nx;
int ny = 1;
int nz = 1;
// set the dimensions of the cuda grid
int ngrid = (n_cells + TPB - 1) / TPB;
dim3 dimGrid(ngrid, 1, 1);
dim3 dimBlock(TPB, 1, 1);
// allocate an array on the CPU to hold max_dti returned from each thread block
Real max_dti = 0;
Real *host_dti_array;
host_dti_array = (Real *) malloc(ngrid*sizeof(Real));
#ifdef COOLING_GPU
Real min_dt = 1e10;
Real *host_dt_array;
host_dt_array = (Real *) malloc(ngrid*sizeof(Real));
#endif
// allocate GPU arrays
// conserved variables
Real *dev_conserved;
// initial input states and associated interface fluxes (Q* and F* from Stone, 2008)
Real *Q_L, *Q_R, *F;
// array to hold zero values for H correction (necessary to pass to Roe solver)
Real *etah;
// array of inverse timesteps for dt calculation
Real *dev_dti_array;
#if defined COOLING_GPU
// array of timesteps for dt calculation (cooling restriction)
Real *dev_dt_array;
#endif
// allocate memory on the GPU
CudaSafeCall( cudaMalloc((void**)&dev_conserved, n_fields*n_cells*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&Q_L, n_fields*n_cells*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&Q_R, n_fields*n_cells*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&F, (n_fields)*n_cells*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&etah, n_cells*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&dev_dti_array, ngrid*sizeof(Real)) );
#if defined COOLING_GPU
CudaSafeCall( cudaMalloc((void**)&dev_dt_array, ngrid*sizeof(Real)) );
#endif
// copy the conserved variable array onto the GPU
CudaSafeCall( cudaMemcpy(dev_conserved, host_conserved0, n_fields*n_cells*sizeof(Real), cudaMemcpyHostToDevice) );
CudaCheckError();
// Step 1: Do the reconstruction
#ifdef PCM
PCM_Reconstruction_1D<<<dimGrid,dimBlock>>>(dev_conserved, Q_L, Q_R, nx, n_ghost, gama, n_fields);
CudaCheckError();
#endif
#ifdef PLMP
PLMP_cuda<<<dimGrid,dimBlock>>>(dev_conserved, Q_L, Q_R, nx, ny, nz, n_ghost, dx, dt, gama, 0, n_fields);
CudaCheckError();
#endif
#ifdef PLMC
PLMC_cuda<<<dimGrid,dimBlock>>>(dev_conserved, Q_L, Q_R, nx, ny, nz, n_ghost, dx, dt, gama, 0, n_fields);
CudaCheckError();
#endif
#ifdef PPMP
PPMP_cuda<<<dimGrid,dimBlock>>>(dev_conserved, Q_L, Q_R, nx, ny, nz, n_ghost, dx, dt, gama, 0, n_fields);
CudaCheckError();
#endif
#ifdef PPMC
PPMC_cuda<<<dimGrid,dimBlock>>>(dev_conserved, Q_L, Q_R, nx, ny, nz, n_ghost, dx, dt, gama, 0, n_fields);
CudaCheckError();
#endif
// Step 2: Calculate the fluxes
#ifdef EXACT
Calculate_Exact_Fluxes_CUDA<<<dimGrid,dimBlock>>>(Q_L, Q_R, F, nx, ny, nz, n_ghost, gama, 0, n_fields);
#endif
#ifdef ROE
Calculate_Roe_Fluxes_CUDA<<<dimGrid,dimBlock>>>(Q_L, Q_R, F, nx, ny, nz, n_ghost, gama, etah, 0, n_fields);
#endif
#ifdef HLLC
Calculate_HLLC_Fluxes_CUDA<<<dimGrid,dimBlock>>>(Q_L, Q_R, F, nx, ny, nz, n_ghost, gama, etah, 0, n_fields);
#endif
CudaCheckError();
// Step 3: Update the conserved variable array
Update_Conserved_Variables_1D<<<dimGrid,dimBlock>>>(dev_conserved, F, n_cells, x_off, n_ghost, dx, xbound, dt, gama, n_fields);
CudaCheckError();
// Sychronize the total and internal energy, if using dual-energy formalism
#ifdef DE
Sync_Energies_1D<<<dimGrid,dimBlock>>>(dev_conserved, n_cells, n_ghost, gama, n_fields);
CudaCheckError();
#endif
// Apply cooling
#ifdef COOLING_GPU
cooling_kernel<<<dimGrid,dimBlock>>>(dev_conserved, nx, ny, nz, n_ghost, n_fields, dt, gama, dev_dti_array);
CudaCheckError();
#endif
// Calculate the next timestep
Calc_dt_1D<<<dimGrid,dimBlock>>>(dev_conserved, n_cells, n_ghost, dx, dev_dti_array, gama);
CudaCheckError();
// copy the conserved variable array back to the CPU
CudaSafeCall( cudaMemcpy(host_conserved1, dev_conserved, n_fields*n_cells*sizeof(Real), cudaMemcpyDeviceToHost) );
// copy the dti array onto the CPU
CudaSafeCall( cudaMemcpy(host_dti_array, dev_dti_array, ngrid*sizeof(Real), cudaMemcpyDeviceToHost) );
// iterate through to find the maximum inverse dt for this subgrid block
for (int i=0; i<ngrid; i++) {
max_dti = fmax(max_dti, host_dti_array[i]);
}
#if defined COOLING_GPU
// copy the dt array from cooling onto the CPU
CudaSafeCall( cudaMemcpy(host_dt_array, dev_dt_array, ngrid*sizeof(Real), cudaMemcpyDeviceToHost) );
// find maximum inverse timestep from cooling time
for (int i=0; i<ngrid; i++) {
min_dt = fmin(min_dt, host_dt_array[i]);
}
if (min_dt < C_cfl/max_dti) {
max_dti = C_cfl/min_dt;
}
#endif
// free the CPU memory
free(host_dti_array);
#if defined COOLING_GPU
free(host_dt_array);
#endif
// free the GPU memory
cudaFree(dev_conserved);
cudaFree(Q_L);
cudaFree(Q_R);
cudaFree(F);
cudaFree(etah);
cudaFree(dev_dti_array);
#if defined COOLING_GPU
cudaFree(dev_dt_array);
#endif
// return the maximum inverse timestep
return max_dti;
}
#endif //CUDA
|
24285c8e8db9c0c4ac664867fdcb1789e0ecc8d3.hip
|
// !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=1024 --gridDim=1 --boogie-file=${KERNEL_DIR}/axioms.bpl --no-inline
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#define N 8
typedef float(*funcType)(float*, unsigned int);
__device__ float multiplyByTwo(float *v, unsigned int tid)
{
return v[tid] * 2.0f;
}
__device__ float divideByTwo(float *v, unsigned int tid)
{
return v[tid] * 0.5f;
}
// Static pointers to device functions
__device__ funcType p_mul_func = multiplyByTwo;
__device__ funcType p_div_func = divideByTwo;
__global__ void foo(float *v, funcType f, unsigned int size, int i)
{
__assert(f == divideByTwo);
__assert(i != 0);
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
void *x = (void*)f; /*ptr_to_ptr*/
if (i == 0)
x = NULL;
funcType g = (funcType)x;
if (tid < size)
{
v[tid] = (*g)(v, tid);
}
}
|
24285c8e8db9c0c4ac664867fdcb1789e0ecc8d3.cu
|
//pass
//--blockDim=1024 --gridDim=1 --boogie-file=${KERNEL_DIR}/axioms.bpl --no-inline
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda.h>
#define N 8
typedef float(*funcType)(float*, unsigned int);
__device__ float multiplyByTwo(float *v, unsigned int tid)
{
return v[tid] * 2.0f;
}
__device__ float divideByTwo(float *v, unsigned int tid)
{
return v[tid] * 0.5f;
}
// Static pointers to device functions
__device__ funcType p_mul_func = multiplyByTwo;
__device__ funcType p_div_func = divideByTwo;
__global__ void foo(float *v, funcType f, unsigned int size, int i)
{
__assert(f == divideByTwo);
__assert(i != 0);
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
void *x = (void*)f; /*ptr_to_ptr*/
if (i == 0)
x = NULL;
funcType g = (funcType)x;
if (tid < size)
{
v[tid] = (*g)(v, tid);
}
}
|
student.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity HW5
Histogramming for Speed
The goal of this assignment is compute a histogram
as fast as possible. We have simplified the problem as much as
possible to allow you to focus solely on the histogramming algorithm.
The input values that you need to histogram are already the exact
bins that need to be updated. This is unlike in HW3 where you needed
to compute the range of the data and then do:
bin = (val - valMin) / valRange to determine the bin.
Here the bin is just:
bin = val
so the serial histogram calculation looks like:
for (i = 0; i < numElems; ++i)
histo[val[i]]++;
That's it! Your job is to make it run as fast as possible!
The values are normally distributed - you may take
advantage of this fact in your implementation.
*/
#include "utils.h"
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
const int N_THREADS = 1024;
__global__
void naiveHisto(const unsigned int* const vals, //INPUT
unsigned int* const histo, //OUPUT
int numVals)
{
int tid = threadIdx.x;
int global_id = tid + blockDim.x*blockIdx.x;
if (global_id >= numVals) return;
atomicAdd(&(histo[vals[global_id]]), 1);
}
__global__
void perBlockHisto(const unsigned int* const vals, //INPUT
unsigned int* const histo, //OUPUT
int numVals,int numBins) {
extern __shared__ unsigned int sharedHisto[]; //size as original histo
//coalesced initialization: multiple blocks could manage the same shared histo
for (int i = threadIdx.x; i < numBins; i += blockDim.x) {
sharedHisto[i] = 0;
}
__syncthreads();
int globalid = threadIdx.x + blockIdx.x*blockDim.x;
atomicAdd(&sharedHisto[vals[globalid]], 1);
__syncthreads();
for (int i = threadIdx.x; i < numBins; i += blockDim.x) {
atomicAdd(&histo[i], sharedHisto[i]);
}
}
void computeHistogram(const unsigned int* const d_vals, //INPUT
unsigned int* const d_histo, //OUTPUT
const unsigned int numBins,
const unsigned int numElems)
{
//TODO Launch the yourHisto kernel
int blocks = ceil(numElems / N_THREADS);
//naiveHisto <<< blocks, N_THREADS >>> (d_vals, d_histo, numElems);
//more than 7x speedup over naiveHisto
perBlockHisto << <blocks, N_THREADS, sizeof(unsigned int)*numBins >> > (d_vals, d_histo, numElems, numBins);
//if you want to use/launch more than one kernel,
//feel free
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
|
student.cu
|
/* Udacity HW5
Histogramming for Speed
The goal of this assignment is compute a histogram
as fast as possible. We have simplified the problem as much as
possible to allow you to focus solely on the histogramming algorithm.
The input values that you need to histogram are already the exact
bins that need to be updated. This is unlike in HW3 where you needed
to compute the range of the data and then do:
bin = (val - valMin) / valRange to determine the bin.
Here the bin is just:
bin = val
so the serial histogram calculation looks like:
for (i = 0; i < numElems; ++i)
histo[val[i]]++;
That's it! Your job is to make it run as fast as possible!
The values are normally distributed - you may take
advantage of this fact in your implementation.
*/
#include "utils.h"
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
const int N_THREADS = 1024;
__global__
void naiveHisto(const unsigned int* const vals, //INPUT
unsigned int* const histo, //OUPUT
int numVals)
{
int tid = threadIdx.x;
int global_id = tid + blockDim.x*blockIdx.x;
if (global_id >= numVals) return;
atomicAdd(&(histo[vals[global_id]]), 1);
}
__global__
void perBlockHisto(const unsigned int* const vals, //INPUT
unsigned int* const histo, //OUPUT
int numVals,int numBins) {
extern __shared__ unsigned int sharedHisto[]; //size as original histo
//coalesced initialization: multiple blocks could manage the same shared histo
for (int i = threadIdx.x; i < numBins; i += blockDim.x) {
sharedHisto[i] = 0;
}
__syncthreads();
int globalid = threadIdx.x + blockIdx.x*blockDim.x;
atomicAdd(&sharedHisto[vals[globalid]], 1);
__syncthreads();
for (int i = threadIdx.x; i < numBins; i += blockDim.x) {
atomicAdd(&histo[i], sharedHisto[i]);
}
}
void computeHistogram(const unsigned int* const d_vals, //INPUT
unsigned int* const d_histo, //OUTPUT
const unsigned int numBins,
const unsigned int numElems)
{
//TODO Launch the yourHisto kernel
int blocks = ceil(numElems / N_THREADS);
//naiveHisto <<< blocks, N_THREADS >>> (d_vals, d_histo, numElems);
//more than 7x speedup over naiveHisto
perBlockHisto << <blocks, N_THREADS, sizeof(unsigned int)*numBins >> > (d_vals, d_histo, numElems, numBins);
//if you want to use/launch more than one kernel,
//feel free
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
7165b0956033a64d60996fe49b593fdfadc6a8e1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int L, int M, int N, double * __restrict__ __var_4__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z);
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_0__ <= (N-3)){
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_1__ <= (M-3)){
int __iter_2__;
__iter_2__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 2;
if(__iter_2__ <= (L-3)){
double __temp_0__;
__temp_0__ = (0.083000f * input[__iter_0__+(2)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
double __temp_1__;
__temp_1__ = (0.083000f * input[__iter_0__+(1)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
double __temp_2__;
__temp_2__ = (__temp_0__ + __temp_1__);
double __temp_3__;
__temp_3__ = (0.083000f * input[__iter_0__+(-1)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
double __temp_4__;
__temp_4__ = (__temp_2__ + __temp_3__);
double __temp_5__;
__temp_5__ = (0.083000f * input[__iter_0__+(-2)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
double __temp_6__;
__temp_6__ = (__temp_4__ + __temp_5__);
double __temp_7__;
__temp_7__ = (0.083000f * input[__iter_0__+(N-0)*(__iter_1__+(2)+(M-0)*(__iter_2__))]);
double __temp_8__;
__temp_8__ = (__temp_6__ + __temp_7__);
double __temp_9__;
__temp_9__ = (0.083000f * input[__iter_0__+(N-0)*(__iter_1__+(1)+(M-0)*(__iter_2__))]);
double __temp_10__;
__temp_10__ = (__temp_8__ + __temp_9__);
double __temp_11__;
__temp_11__ = (0.083000f * input[__iter_0__+(N-0)*(__iter_1__+(-1)+(M-0)*(__iter_2__))]);
double __temp_12__;
__temp_12__ = (__temp_10__ + __temp_11__);
double __temp_13__;
__temp_13__ = (0.083000f * input[__iter_0__+(N-0)*(__iter_1__+(-2)+(M-0)*(__iter_2__))]);
double __temp_14__;
__temp_14__ = (__temp_12__ + __temp_13__);
double __temp_15__;
__temp_15__ = (0.083000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(2)))]);
double __temp_16__;
__temp_16__ = (__temp_14__ + __temp_15__);
double __temp_17__;
__temp_17__ = (0.083000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(1)))]);
double __temp_18__;
__temp_18__ = (__temp_16__ + __temp_17__);
double __temp_19__;
__temp_19__ = (0.083000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(-1)))]);
double __temp_20__;
__temp_20__ = (__temp_18__ + __temp_19__);
double __temp_21__;
__temp_21__ = (0.083000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(-2)))]);
double __temp_22__;
__temp_22__ = (__temp_20__ + __temp_21__);
double __temp_23__;
__temp_23__ = (0.996000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
double __temp_24__;
__temp_24__ = (__temp_22__ - __temp_23__);
__var_4__[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))] = __temp_24__;
}
}
}
}
/* Host Code Begin */
extern "C" void j3d13pt(double * h_input, int L, int M, int N, double * __var_0__){
/* Host allocation Begin */
double * input;
hipMalloc(&input,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(double)*((L-0)*(M-0)*(N-0)), memcpy_kind_h_input);
}
double * __var_1__;
hipMalloc(&__var_1__,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
double * __var_2__;
hipMalloc(&__var_2__,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
double * __var_3__;
hipMalloc(&__var_3__,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_3__\n");
double * __var_4__;
hipMalloc(&__var_4__,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_4__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((N-3) - 2 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((M-3) - 2 ) + 1;
int __size_2___kernel___forma_kernel__0__ = ((L-3) - 2 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 16;
int __block_1___kernel___forma_kernel__0__ = 4;
int __block_2___kernel___forma_kernel__0__ = 4;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __grid_2___kernel___forma_kernel__0__ = FORMA_CEIL(__size_2___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __var_4__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_4__, L, M, N, __var_3__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_3__, L, M, N, __var_2__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, L, M, N, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(double)*((L-0)*(M-0)*(N-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__var_2__);
hipFree(__var_3__);
hipFree(__var_4__);
}
/*Host Free End*/
|
7165b0956033a64d60996fe49b593fdfadc6a8e1.cu
|
#include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int L, int M, int N, double * __restrict__ __var_4__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z);
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_0__ <= (N-3)){
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_1__ <= (M-3)){
int __iter_2__;
__iter_2__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 2;
if(__iter_2__ <= (L-3)){
double __temp_0__;
__temp_0__ = (0.083000f * input[__iter_0__+(2)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
double __temp_1__;
__temp_1__ = (0.083000f * input[__iter_0__+(1)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
double __temp_2__;
__temp_2__ = (__temp_0__ + __temp_1__);
double __temp_3__;
__temp_3__ = (0.083000f * input[__iter_0__+(-1)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
double __temp_4__;
__temp_4__ = (__temp_2__ + __temp_3__);
double __temp_5__;
__temp_5__ = (0.083000f * input[__iter_0__+(-2)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
double __temp_6__;
__temp_6__ = (__temp_4__ + __temp_5__);
double __temp_7__;
__temp_7__ = (0.083000f * input[__iter_0__+(N-0)*(__iter_1__+(2)+(M-0)*(__iter_2__))]);
double __temp_8__;
__temp_8__ = (__temp_6__ + __temp_7__);
double __temp_9__;
__temp_9__ = (0.083000f * input[__iter_0__+(N-0)*(__iter_1__+(1)+(M-0)*(__iter_2__))]);
double __temp_10__;
__temp_10__ = (__temp_8__ + __temp_9__);
double __temp_11__;
__temp_11__ = (0.083000f * input[__iter_0__+(N-0)*(__iter_1__+(-1)+(M-0)*(__iter_2__))]);
double __temp_12__;
__temp_12__ = (__temp_10__ + __temp_11__);
double __temp_13__;
__temp_13__ = (0.083000f * input[__iter_0__+(N-0)*(__iter_1__+(-2)+(M-0)*(__iter_2__))]);
double __temp_14__;
__temp_14__ = (__temp_12__ + __temp_13__);
double __temp_15__;
__temp_15__ = (0.083000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(2)))]);
double __temp_16__;
__temp_16__ = (__temp_14__ + __temp_15__);
double __temp_17__;
__temp_17__ = (0.083000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(1)))]);
double __temp_18__;
__temp_18__ = (__temp_16__ + __temp_17__);
double __temp_19__;
__temp_19__ = (0.083000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(-1)))]);
double __temp_20__;
__temp_20__ = (__temp_18__ + __temp_19__);
double __temp_21__;
__temp_21__ = (0.083000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(-2)))]);
double __temp_22__;
__temp_22__ = (__temp_20__ + __temp_21__);
double __temp_23__;
__temp_23__ = (0.996000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
double __temp_24__;
__temp_24__ = (__temp_22__ - __temp_23__);
__var_4__[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))] = __temp_24__;
}
}
}
}
/* Host Code Begin */
extern "C" void j3d13pt(double * h_input, int L, int M, int N, double * __var_0__){
/* Host allocation Begin */
double * input;
cudaMalloc(&input,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(double)*((L-0)*(M-0)*(N-0)), memcpy_kind_h_input);
}
double * __var_1__;
cudaMalloc(&__var_1__,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
double * __var_2__;
cudaMalloc(&__var_2__,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
double * __var_3__;
cudaMalloc(&__var_3__,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_3__\n");
double * __var_4__;
cudaMalloc(&__var_4__,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_4__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((N-3) - 2 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((M-3) - 2 ) + 1;
int __size_2___kernel___forma_kernel__0__ = ((L-3) - 2 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 16;
int __block_1___kernel___forma_kernel__0__ = 4;
int __block_2___kernel___forma_kernel__0__ = 4;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __grid_2___kernel___forma_kernel__0__ = FORMA_CEIL(__size_2___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __var_4__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_4__, L, M, N, __var_3__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_3__, L, M, N, __var_2__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, L, M, N, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(double)*((L-0)*(M-0)*(N-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__var_2__);
cudaFree(__var_3__);
cudaFree(__var_4__);
}
/*Host Free End*/
|
a310cd3d762361bd31fcdcf98e4991111d0f84fd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "mymulmat.h"
#include <cstdio>
#include <hip/hip_runtime.h>
void printError(hipError_t e, int l) {
if (e != hipSuccess) {
printf("error: %s (code %d), line(%d)\n", hipGetErrorString(e), e, l);
exit(EXIT_FAILURE);
}
}
#define CheckError(f) printError((f), __LINE__)
#define BX 32
#define BY 8
#define STEP 32
#define UNX 8
#define UNY 16
#define G_A_INDEX(b_x, b_y, t_x, t_y) ((n) * ((BY) * (b_y) + (t_y)) + ((STEP) * (b_x) + (t_x)))
#define G_B_INDEX(b_x, b_y, t_x, t_y) ((n) * ((STEP) * (b_y) + (t_y)) + ((BX) * (b_x) + (t_x)))
#define G_C_INDEX(b_x, b_y, t_x, t_y) ((n) * ((BY) * (b_y) + (t_y)) + ((BX) * (b_x) + (t_x)))
#define S_INDEX(t_x, t_y, w_x) ((t_y) * (w_x) + (t_x))
__global__
void kernel(int n, int m, int k, melem_t *A, melem_t *B, melem_t *C) {
__shared__ melem_t A_[STEP * (BY * UNY)];
__shared__ melem_t B_[(BX * UNX) * STEP];
melem_t C_[UNX * UNY];
for(int i = 0; i < UNX; i++){
for(int j = 0; j < UNY; j++){
C_[S_INDEX(i, j, UNX)] = 0;
}
}
for(int s = 0; s < n / STEP; s++){
for(int t = 0; t < STEP/BX; t++){
for(int i = 0; i < UNY; i++){
A_[S_INDEX(threadIdx.x + BX * t, threadIdx.y + BY * i, STEP)] = A[G_A_INDEX(s, blockIdx.y * UNY + i, threadIdx.x + BX * t, threadIdx.y)];
}
}
for(int t = 0; t < STEP/BY; t++){
for(int i = 0; i < UNX; i++){
B_[S_INDEX(threadIdx.x + BX * i, threadIdx.y + BY * t, BX * UNX)] = B[G_B_INDEX(blockIdx.x * UNX + i, s, threadIdx.x, threadIdx.y + BY * t)];
}
}
__syncthreads();
for(int k = 0; k < STEP; k++){
for(int i = 0; i < UNX; i++){
for(int j = 0; j < UNY; j++){
C_[S_INDEX(i, j, UNX)] += A_[S_INDEX(k, threadIdx.y + BY * j, STEP)] * B_[S_INDEX(threadIdx.x + BX * i, k, BX * UNX)];
}
}
}
__syncthreads();
}
for(int i = 0; i < UNX; i++){
for(int j = 0; j < UNY; j++){
C[G_C_INDEX(blockIdx.x * UNX + i, blockIdx.y * UNY + j, threadIdx.x, threadIdx.y)] = C_[S_INDEX(i, j, UNX)];
}
}
}
uint64_t cudaGemm(int n, int m, int k, melem_t *A, melem_t *B, melem_t *C) {
// device initialize
int device = 0;
hipSetDevice(device);
// device malloc
melem_t *devA, *devB, *devC;
size_t sizeA = size_t(n)*k*sizeof(melem_t);
size_t sizeB = size_t(k)*m*sizeof(melem_t);
size_t sizeC = size_t(n)*m*sizeof(melem_t);
CheckError(hipMalloc((void**) &devA, sizeA));
CheckError(hipMalloc((void**) &devB, sizeB));
CheckError(hipMalloc((void**) &devC, sizeC));
// data load
CheckError(hipMemcpy(devA, A, sizeA, hipMemcpyHostToDevice));
CheckError(hipMemcpy(devB, B, sizeB, hipMemcpyHostToDevice));
CheckError(hipMemcpy(devC, C, sizeC, hipMemcpyHostToDevice));
// gemm start
hipEvent_t start, stop;
CheckError(hipEventCreate(&start));
CheckError(hipEventCreate(&stop));
hipDeviceSynchronize();
dim3 grid(n / (BX * UNX), n / (BY * UNY));
dim3 block(BX, BY);
printf("Shared memory: %d\n", (STEP * BY * UNY + BX * UNX * STEP) * 4);
// time measuring
CheckError(hipEventRecord(start, NULL));
hipLaunchKernelGGL(( kernel) , dim3(grid), dim3(block), 0, 0, n, m, k, devA, devB, devC);
CheckError(hipEventRecord(stop, NULL));
// gemm end
CheckError(hipEventSynchronize(stop));
float msec = 0.0f;
CheckError(hipEventElapsedTime(&msec, start, stop));
// data store
CheckError(hipMemcpy(C, devC, sizeC, hipMemcpyDeviceToHost));
// device free
CheckError(hipFree(devA));
CheckError(hipFree(devB));
CheckError(hipFree(devC));
return (uint64_t)(msec * 1000.0f);
}
|
a310cd3d762361bd31fcdcf98e4991111d0f84fd.cu
|
#include "mymulmat.h"
#include <cstdio>
#include <cuda_runtime.h>
void printError(cudaError_t e, int l) {
if (e != cudaSuccess) {
printf("error: %s (code %d), line(%d)\n", cudaGetErrorString(e), e, l);
exit(EXIT_FAILURE);
}
}
#define CheckError(f) printError((f), __LINE__)
#define BX 32
#define BY 8
#define STEP 32
#define UNX 8
#define UNY 16
#define G_A_INDEX(b_x, b_y, t_x, t_y) ((n) * ((BY) * (b_y) + (t_y)) + ((STEP) * (b_x) + (t_x)))
#define G_B_INDEX(b_x, b_y, t_x, t_y) ((n) * ((STEP) * (b_y) + (t_y)) + ((BX) * (b_x) + (t_x)))
#define G_C_INDEX(b_x, b_y, t_x, t_y) ((n) * ((BY) * (b_y) + (t_y)) + ((BX) * (b_x) + (t_x)))
#define S_INDEX(t_x, t_y, w_x) ((t_y) * (w_x) + (t_x))
__global__
void kernel(int n, int m, int k, melem_t *A, melem_t *B, melem_t *C) {
__shared__ melem_t A_[STEP * (BY * UNY)];
__shared__ melem_t B_[(BX * UNX) * STEP];
melem_t C_[UNX * UNY];
for(int i = 0; i < UNX; i++){
for(int j = 0; j < UNY; j++){
C_[S_INDEX(i, j, UNX)] = 0;
}
}
for(int s = 0; s < n / STEP; s++){
for(int t = 0; t < STEP/BX; t++){
for(int i = 0; i < UNY; i++){
A_[S_INDEX(threadIdx.x + BX * t, threadIdx.y + BY * i, STEP)] = A[G_A_INDEX(s, blockIdx.y * UNY + i, threadIdx.x + BX * t, threadIdx.y)];
}
}
for(int t = 0; t < STEP/BY; t++){
for(int i = 0; i < UNX; i++){
B_[S_INDEX(threadIdx.x + BX * i, threadIdx.y + BY * t, BX * UNX)] = B[G_B_INDEX(blockIdx.x * UNX + i, s, threadIdx.x, threadIdx.y + BY * t)];
}
}
__syncthreads();
for(int k = 0; k < STEP; k++){
for(int i = 0; i < UNX; i++){
for(int j = 0; j < UNY; j++){
C_[S_INDEX(i, j, UNX)] += A_[S_INDEX(k, threadIdx.y + BY * j, STEP)] * B_[S_INDEX(threadIdx.x + BX * i, k, BX * UNX)];
}
}
}
__syncthreads();
}
for(int i = 0; i < UNX; i++){
for(int j = 0; j < UNY; j++){
C[G_C_INDEX(blockIdx.x * UNX + i, blockIdx.y * UNY + j, threadIdx.x, threadIdx.y)] = C_[S_INDEX(i, j, UNX)];
}
}
}
uint64_t cudaGemm(int n, int m, int k, melem_t *A, melem_t *B, melem_t *C) {
// device initialize
int device = 0;
cudaSetDevice(device);
// device malloc
melem_t *devA, *devB, *devC;
size_t sizeA = size_t(n)*k*sizeof(melem_t);
size_t sizeB = size_t(k)*m*sizeof(melem_t);
size_t sizeC = size_t(n)*m*sizeof(melem_t);
CheckError(cudaMalloc((void**) &devA, sizeA));
CheckError(cudaMalloc((void**) &devB, sizeB));
CheckError(cudaMalloc((void**) &devC, sizeC));
// data load
CheckError(cudaMemcpy(devA, A, sizeA, cudaMemcpyHostToDevice));
CheckError(cudaMemcpy(devB, B, sizeB, cudaMemcpyHostToDevice));
CheckError(cudaMemcpy(devC, C, sizeC, cudaMemcpyHostToDevice));
// gemm start
cudaEvent_t start, stop;
CheckError(cudaEventCreate(&start));
CheckError(cudaEventCreate(&stop));
cudaDeviceSynchronize();
dim3 grid(n / (BX * UNX), n / (BY * UNY));
dim3 block(BX, BY);
printf("Shared memory: %d\n", (STEP * BY * UNY + BX * UNX * STEP) * 4);
// time measuring
CheckError(cudaEventRecord(start, NULL));
kernel <<<grid, block>>> (n, m, k, devA, devB, devC);
CheckError(cudaEventRecord(stop, NULL));
// gemm end
CheckError(cudaEventSynchronize(stop));
float msec = 0.0f;
CheckError(cudaEventElapsedTime(&msec, start, stop));
// data store
CheckError(cudaMemcpy(C, devC, sizeC, cudaMemcpyDeviceToHost));
// device free
CheckError(cudaFree(devA));
CheckError(cudaFree(devB));
CheckError(cudaFree(devC));
return (uint64_t)(msec * 1000.0f);
}
|
f03aa116ef7948115792c37459068c1c8bcea6d1.hip
|
// !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <vector>
#include <hip/hip_runtime.h>
#include "NPP_staging.hpp"
texture<Ncv8u, 1, hipReadModeElementType> tex8u;
texture<Ncv32u, 1, hipReadModeElementType> tex32u;
texture<uint2, 1, hipReadModeElementType> tex64u;
//==============================================================================
//
// CUDA streams handling
//
//==============================================================================
static hipStream_t nppStream = 0;
hipStream_t nppStGetActiveCUDAstream(void)
{
return nppStream;
}
hipStream_t nppStSetActiveCUDAstream(hipStream_t cudaStream)
{
hipStream_t tmp = nppStream;
nppStream = cudaStream;
return tmp;
}
//==============================================================================
//
// BlockScan.cuh
//
//==============================================================================
NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive
//Almost the same as naive scan1Inclusive, but doesn't need __syncthreads()
//assuming size <= WARP_SIZE and size is power of 2
template <class T>
inline __device__ T warpScanInclusive(T idata, volatile T *s_Data)
{
Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1));
s_Data[pos] = 0;
pos += K_WARP_SIZE;
s_Data[pos] = idata;
s_Data[pos] += s_Data[pos - 1];
s_Data[pos] += s_Data[pos - 2];
s_Data[pos] += s_Data[pos - 4];
s_Data[pos] += s_Data[pos - 8];
s_Data[pos] += s_Data[pos - 16];
return s_Data[pos];
}
template <class T>
inline __device__ T warpScanExclusive(T idata, volatile T *s_Data)
{
return warpScanInclusive(idata, s_Data) - idata;
}
template <class T, Ncv32u tiNumScanThreads>
inline __device__ T blockScanInclusive(T idata, volatile T *s_Data)
{
if (tiNumScanThreads > K_WARP_SIZE)
{
//Bottom-level inclusive warp scan
T warpResult = warpScanInclusive(idata, s_Data);
//Save top elements of each warp for exclusive warp scan
//sync to wait for warp scans to complete (because s_Data is being overwritten)
__syncthreads();
if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) )
{
s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult;
}
//wait for warp scans to complete
__syncthreads();
if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) )
{
//grab top warp elements
T val = s_Data[threadIdx.x];
//calculate exclusive scan and write back to shared memory
s_Data[threadIdx.x] = warpScanExclusive(val, s_Data);
}
//return updated warp scans with exclusive scan results
__syncthreads();
return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE];
}
else
{
return warpScanInclusive(idata, s_Data);
}
}
//==============================================================================
//
// IntegralImage.cu
//
//==============================================================================
const Ncv32u NUM_SCAN_THREADS = 256;
const Ncv32u LOG2_NUM_SCAN_THREADS = 8;
template<class T_in, class T_out>
struct _scanElemOp
{
template<bool tbDoSqr>
static inline __host__ __device__ T_out scanElemOp(T_in elem)
{
return scanElemOp( elem, Int2Type<(int)tbDoSqr>() );
}
private:
template <int v> struct Int2Type { enum { value = v }; };
static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<0>)
{
return (T_out)elem;
}
static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<1>)
{
return (T_out)(elem*elem);
}
};
template<class T>
inline __device__ T readElem(T *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs);
template<>
inline __device__ Ncv8u readElem<Ncv8u>(Ncv8u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs)
{
return tex1Dfetch(tex8u, texOffs + srcStride * blockIdx.x + curElemOffs);
}
template<>
inline __device__ Ncv32u readElem<Ncv32u>(Ncv32u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs)
{
return d_src[curElemOffs];
}
template<>
inline __device__ Ncv32f readElem<Ncv32f>(Ncv32f *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs)
{
return d_src[curElemOffs];
}
/**
* \brief Segmented scan kernel
*
* Calculates per-row prefix scans of the input image.
* Out-of-bounds safe: reads 'size' elements, writes 'size+1' elements
*
* \tparam T_in Type of input image elements
* \tparam T_out Type of output image elements
* \tparam T_op Defines an operation to be performed on the input image pixels
*
* \param d_src [IN] Source image pointer
* \param srcWidth [IN] Source image width
* \param srcStride [IN] Source image stride
* \param d_II [OUT] Output image pointer
* \param IIstride [IN] Output image stride
*
* \return None
*/
template <class T_in, class T_out, bool tbDoSqr>
__global__ void scanRows(T_in *d_src, Ncv32u texOffs, Ncv32u srcWidth, Ncv32u srcStride,
T_out *d_II, Ncv32u IIstride)
{
//advance pointers to the current line
if (sizeof(T_in) != 1)
{
d_src += srcStride * blockIdx.x;
}
//for initial image 8bit source we use texref tex8u
d_II += IIstride * blockIdx.x;
Ncv32u numBuckets = (srcWidth + NUM_SCAN_THREADS - 1) >> LOG2_NUM_SCAN_THREADS;
Ncv32u offsetX = 0;
__shared__ T_out shmem[NUM_SCAN_THREADS * 2];
__shared__ T_out carryElem;
carryElem = 0;
__syncthreads();
while (numBuckets--)
{
Ncv32u curElemOffs = offsetX + threadIdx.x;
T_out curScanElem;
T_in curElem;
T_out curElemMod;
if (curElemOffs < srcWidth)
{
//load elements
curElem = readElem<T_in>(d_src, texOffs, srcStride, curElemOffs);
}
curElemMod = _scanElemOp<T_in, T_out>::scanElemOp<tbDoSqr>(curElem);
//inclusive scan
curScanElem = blockScanInclusive<T_out, NUM_SCAN_THREADS>(curElemMod, shmem);
if (curElemOffs <= srcWidth)
{
//make scan exclusive and write the bucket to the output buffer
d_II[curElemOffs] = carryElem + curScanElem - curElemMod;
offsetX += NUM_SCAN_THREADS;
}
//remember last element for subsequent buckets adjustment
__syncthreads();
if (threadIdx.x == NUM_SCAN_THREADS-1)
{
carryElem += curScanElem;
}
__syncthreads();
}
if (offsetX == srcWidth && !threadIdx.x)
{
d_II[offsetX] = carryElem;
}
}
template <bool tbDoSqr, class T_in, class T_out>
NCVStatus scanRowsWrapperDevice(T_in *d_src, Ncv32u srcStride,
T_out *d_dst, Ncv32u dstStride, NcvSize32u roi)
{
hipChannelFormatDesc cfdTex;
size_t alignmentOffset = 0;
if (sizeof(T_in) == 1)
{
cfdTex = hipCreateChannelDesc<Ncv8u>();
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR);
if (alignmentOffset > 0)
{
ncvAssertCUDAReturn(hipUnbindTexture(tex8u), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, alignmentOffset + roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR);
}
}
hipLaunchKernelGGL(( scanRows
<T_in, T_out, tbDoSqr>)
, dim3(roi.height), dim3(NUM_SCAN_THREADS), 0, nppStGetActiveCUDAstream(),
d_src, (Ncv32u)alignmentOffset, roi.width, srcStride, d_dst, dstStride);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
static Ncv32u getPaddedDimension(Ncv32u dim, Ncv32u elemTypeSize, Ncv32u allocatorAlignment)
{
Ncv32u alignMask = allocatorAlignment-1;
Ncv32u inverseAlignMask = ~alignMask;
Ncv32u dimBytes = dim * elemTypeSize;
Ncv32u pitch = (dimBytes + alignMask) & inverseAlignMask;
Ncv32u PaddedDim = pitch / elemTypeSize;
return PaddedDim;
}
template <class T_in, class T_out>
NCVStatus ncvIntegralImage_device(T_in *d_src, Ncv32u srcStep,
T_out *d_dst, Ncv32u dstStep, NcvSize32u roi,
INCVMemAllocator &gpuAllocator)
{
ncvAssertReturn(sizeof(T_out) == sizeof(Ncv32u), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice ||
gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roi.width * sizeof(T_in) &&
dstStep >= (roi.width + 1) * sizeof(T_out) &&
srcStep % sizeof(T_in) == 0 &&
dstStep % sizeof(T_out) == 0, NPPST_INVALID_STEP);
srcStep /= sizeof(T_in);
dstStep /= sizeof(T_out);
Ncv32u WidthII = roi.width + 1;
Ncv32u HeightII = roi.height + 1;
Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment());
Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment());
NCVMatrixAlloc<T_out> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32);
ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCVMatrixAlloc<T_out> Tmp32_2(gpuAllocator, PaddedHeightII32, PaddedWidthII32);
ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_2.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn(Tmp32_1.pitch() * Tmp32_1.height() == Tmp32_2.pitch() * Tmp32_2.height(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat;
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
NCV_SKIP_COND_BEGIN
ncvStat = scanRowsWrapperDevice
<false>
(d_src, srcStep, Tmp32_1.ptr(), PaddedWidthII32, roi);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedWidthII32*sizeof(Ncv32u),
(Ncv32u *)Tmp32_2.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height));
ncvAssertReturnNcvStat(ncvStat);
ncvStat = scanRowsWrapperDevice
<false>
(Tmp32_2.ptr(), PaddedHeightII32, Tmp32_1.ptr(), PaddedHeightII32, NcvSize32u(roi.height, WidthII));
ncvAssertReturnNcvStat(ncvStat);
ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u),
(Ncv32u *)d_dst, dstStep*sizeof(Ncv32u), NcvSize32u(HeightII, WidthII));
ncvAssertReturnNcvStat(ncvStat);
NCV_SKIP_COND_END
return NPPST_SUCCESS;
}
NCVStatus ncvSquaredIntegralImage_device(Ncv8u *d_src, Ncv32u srcStep,
Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roi,
INCVMemAllocator &gpuAllocator)
{
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice ||
gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR);
ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roi.width &&
dstStep >= (roi.width + 1) * sizeof(Ncv64u) &&
dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP);
dstStep /= sizeof(Ncv64u);
Ncv32u WidthII = roi.width + 1;
Ncv32u HeightII = roi.height + 1;
Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment());
Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment());
Ncv32u PaddedWidthII64 = getPaddedDimension(WidthII, sizeof(Ncv64u), gpuAllocator.alignment());
Ncv32u PaddedHeightII64 = getPaddedDimension(HeightII, sizeof(Ncv64u), gpuAllocator.alignment());
Ncv32u PaddedWidthMax = PaddedWidthII32 > PaddedWidthII64 ? PaddedWidthII32 : PaddedWidthII64;
Ncv32u PaddedHeightMax = PaddedHeightII32 > PaddedHeightII64 ? PaddedHeightII32 : PaddedHeightII64;
NCVMatrixAlloc<Ncv32u> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32);
ncvAssertReturn(Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCVMatrixAlloc<Ncv64u> Tmp64(gpuAllocator, PaddedWidthMax, PaddedHeightMax);
ncvAssertReturn(Tmp64.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCVMatrixReuse<Ncv32u> Tmp32_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII32, PaddedHeightII32);
ncvAssertReturn(Tmp32_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR);
NCVMatrixReuse<Ncv64u> Tmp64_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII64, PaddedHeightII64);
ncvAssertReturn(Tmp64_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat;
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
NCV_SKIP_COND_BEGIN
ncvStat = scanRowsWrapperDevice
<true, Ncv8u, Ncv32u>
(d_src, srcStep, Tmp32_2.ptr(), PaddedWidthII32, roi);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = nppiStTranspose_32u_C1R(Tmp32_2.ptr(), PaddedWidthII32*sizeof(Ncv32u),
Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height));
ncvAssertReturnNcvStat(ncvStat);
ncvStat = scanRowsWrapperDevice
<false, Ncv32u, Ncv64u>
(Tmp32_1.ptr(), PaddedHeightII32, Tmp64_2.ptr(), PaddedHeightII64, NcvSize32u(roi.height, WidthII));
ncvAssertReturnNcvStat(ncvStat);
ncvStat = nppiStTranspose_64u_C1R(Tmp64_2.ptr(), PaddedHeightII64*sizeof(Ncv64u),
d_dst, dstStep*sizeof(Ncv64u), NcvSize32u(HeightII, WidthII));
ncvAssertReturnNcvStat(ncvStat);
NCV_SKIP_COND_END
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegralGetSize_8u32u(NcvSize32u roiSize, Ncv32u *pBufsize, hipDeviceProp_t &devProp)
{
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvIntegralImage_device((Ncv8u*)NULL, roiSize.width,
(Ncv32u*)NULL, (roiSize.width+1) * sizeof(Ncv32u),
roiSize, gpuCounter);
ncvAssertReturnNcvStat(ncvStat);
*pBufsize = (Ncv32u)gpuCounter.maxSize();
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegralGetSize_32f32f(NcvSize32u roiSize, Ncv32u *pBufsize, hipDeviceProp_t &devProp)
{
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvIntegralImage_device((Ncv32f*)NULL, roiSize.width * sizeof(Ncv32f),
(Ncv32f*)NULL, (roiSize.width+1) * sizeof(Ncv32f),
roiSize, gpuCounter);
ncvAssertReturnNcvStat(ncvStat);
*pBufsize = (Ncv32u)gpuCounter.maxSize();
return NPPST_SUCCESS;
}
NCVStatus nppiStSqrIntegralGetSize_8u64u(NcvSize32u roiSize, Ncv32u *pBufsize, hipDeviceProp_t &devProp)
{
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvSquaredIntegralImage_device(NULL, roiSize.width,
NULL, (roiSize.width+1) * sizeof(Ncv64u),
roiSize, gpuCounter);
ncvAssertReturnNcvStat(ncvStat);
*pBufsize = (Ncv32u)gpuCounter.maxSize();
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegral_8u32u_C1R(Ncv8u *d_src, Ncv32u srcStep,
Ncv32u *d_dst, Ncv32u dstStep,
NcvSize32u roiSize, Ncv8u *pBuffer,
Ncv32u bufSize, hipDeviceProp_t &devProp)
{
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator);
ncvAssertReturnNcvStat(ncvStat);
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegral_32f32f_C1R(Ncv32f *d_src, Ncv32u srcStep,
Ncv32f *d_dst, Ncv32u dstStep,
NcvSize32u roiSize, Ncv8u *pBuffer,
Ncv32u bufSize, hipDeviceProp_t &devProp)
{
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator);
ncvAssertReturnNcvStat(ncvStat);
return NPPST_SUCCESS;
}
NCVStatus nppiStSqrIntegral_8u64u_C1R(Ncv8u *d_src, Ncv32u srcStep,
Ncv64u *d_dst, Ncv32u dstStep,
NcvSize32u roiSize, Ncv8u *pBuffer,
Ncv32u bufSize, hipDeviceProp_t &devProp)
{
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvSquaredIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator);
ncvAssertReturnNcvStat(ncvStat);
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegral_8u32u_C1R_host(Ncv8u *h_src, Ncv32u srcStep,
Ncv32u *h_dst, Ncv32u dstStep,
NcvSize32u roiSize)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roiSize.width &&
dstStep >= (roiSize.width + 1) * sizeof(Ncv32u) &&
dstStep % sizeof(Ncv32u) == 0, NPPST_INVALID_STEP);
dstStep /= sizeof(Ncv32u);
Ncv32u WidthII = roiSize.width + 1;
Ncv32u HeightII = roiSize.height + 1;
memset(h_dst, 0, WidthII * sizeof(Ncv32u));
for (Ncv32u i=1; i<HeightII; i++)
{
h_dst[i * dstStep] = 0;
for (Ncv32u j=1; j<WidthII; j++)
{
Ncv32u top = h_dst[(i-1) * dstStep + j];
Ncv32u left = h_dst[i * dstStep + (j - 1)];
Ncv32u topleft = h_dst[(i - 1) * dstStep + (j - 1)];
Ncv32u elem = h_src[(i - 1) * srcStep + (j - 1)];
h_dst[i * dstStep + j] = elem + left - topleft + top;
}
}
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegral_32f32f_C1R_host(Ncv32f *h_src, Ncv32u srcStep,
Ncv32f *h_dst, Ncv32u dstStep,
NcvSize32u roiSize)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roiSize.width * sizeof(Ncv32f) &&
dstStep >= (roiSize.width + 1) * sizeof(Ncv32f) &&
srcStep % sizeof(Ncv32f) == 0 &&
dstStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP);
srcStep /= sizeof(Ncv32f);
dstStep /= sizeof(Ncv32f);
Ncv32u WidthII = roiSize.width + 1;
Ncv32u HeightII = roiSize.height + 1;
memset(h_dst, 0, WidthII * sizeof(Ncv32u));
for (Ncv32u i=1; i<HeightII; i++)
{
h_dst[i * dstStep] = 0.0f;
for (Ncv32u j=1; j<WidthII; j++)
{
Ncv32f top = h_dst[(i-1) * dstStep + j];
Ncv32f left = h_dst[i * dstStep + (j - 1)];
Ncv32f topleft = h_dst[(i - 1) * dstStep + (j - 1)];
Ncv32f elem = h_src[(i - 1) * srcStep + (j - 1)];
h_dst[i * dstStep + j] = elem + left - topleft + top;
}
}
return NPPST_SUCCESS;
}
NCVStatus nppiStSqrIntegral_8u64u_C1R_host(Ncv8u *h_src, Ncv32u srcStep,
Ncv64u *h_dst, Ncv32u dstStep,
NcvSize32u roiSize)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roiSize.width &&
dstStep >= (roiSize.width + 1) * sizeof(Ncv64u) &&
dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP);
dstStep /= sizeof(Ncv64u);
Ncv32u WidthII = roiSize.width + 1;
Ncv32u HeightII = roiSize.height + 1;
memset(h_dst, 0, WidthII * sizeof(Ncv64u));
for (Ncv32u i=1; i<HeightII; i++)
{
h_dst[i * dstStep] = 0;
for (Ncv32u j=1; j<WidthII; j++)
{
Ncv64u top = h_dst[(i-1) * dstStep + j];
Ncv64u left = h_dst[i * dstStep + (j - 1)];
Ncv64u topleft = h_dst[(i - 1) * dstStep + (j - 1)];
Ncv64u elem = h_src[(i - 1) * srcStep + (j - 1)];
h_dst[i * dstStep + j] = elem*elem + left - topleft + top;
}
}
return NPPST_SUCCESS;
}
//==============================================================================
//
// Decimate.cu
//
//==============================================================================
const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_X = 32;
const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_Y = 8;
template<class T, NcvBool tbCacheTexture>
__device__ T getElem_Decimate(Ncv32u x, T *d_src);
template<>
__device__ Ncv32u getElem_Decimate<Ncv32u, true>(Ncv32u x, Ncv32u *d_src)
{
return tex1Dfetch(tex32u, x);
}
template<>
__device__ Ncv32u getElem_Decimate<Ncv32u, false>(Ncv32u x, Ncv32u *d_src)
{
return d_src[x];
}
template<>
__device__ Ncv64u getElem_Decimate<Ncv64u, true>(Ncv32u x, Ncv64u *d_src)
{
uint2 tmp = tex1Dfetch(tex64u, x);
Ncv64u res = (Ncv64u)tmp.y;
res <<= 32;
res |= tmp.x;
return res;
}
template<>
__device__ Ncv64u getElem_Decimate<Ncv64u, false>(Ncv32u x, Ncv64u *d_src)
{
return d_src[x];
}
template <class T, NcvBool tbCacheTexture>
__global__ void decimate_C1R(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep,
NcvSize32u dstRoi, Ncv32u scale)
{
int curX = blockIdx.x * blockDim.x + threadIdx.x;
int curY = blockIdx.y * blockDim.y + threadIdx.y;
if (curX >= dstRoi.width || curY >= dstRoi.height)
{
return;
}
d_dst[curY * dstStep + curX] = getElem_Decimate<T, tbCacheTexture>((curY * srcStep + curX) * scale, d_src);
}
template <class T>
static NCVStatus decimateWrapperDevice(T *d_src, Ncv32u srcStep,
T *d_dst, Ncv32u dstStep,
NcvSize32u srcRoi, Ncv32u scale,
NcvBool readThruTexture)
{
ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE);
ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) &&
dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale), NPPST_INVALID_STEP);
srcStep /= sizeof(T);
dstStep /= sizeof(T);
NcvSize32u dstRoi;
dstRoi.width = srcRoi.width / scale;
dstRoi.height = srcRoi.height / scale;
dim3 grid((dstRoi.width + NUM_DOWNSAMPLE_NEAREST_THREADS_X - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_X,
(dstRoi.height + NUM_DOWNSAMPLE_NEAREST_THREADS_Y - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_Y);
dim3 block(NUM_DOWNSAMPLE_NEAREST_THREADS_X, NUM_DOWNSAMPLE_NEAREST_THREADS_Y);
if (!readThruTexture)
{
hipLaunchKernelGGL(( decimate_C1R
<T, false>)
, dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(),
d_src, srcStep, d_dst, dstStep, dstRoi, scale);
}
else
{
hipChannelFormatDesc cfdTexSrc;
if (sizeof(T) == sizeof(Ncv32u))
{
cfdTexSrc = hipCreateChannelDesc<Ncv32u>();
size_t alignmentOffset;
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex32u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR);
ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR);
}
else
{
cfdTexSrc = hipCreateChannelDesc<uint2>();
size_t alignmentOffset;
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex64u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR);
ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR);
}
hipLaunchKernelGGL(( decimate_C1R
<T, true>)
, dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(),
d_src, srcStep, d_dst, dstStep, dstRoi, scale);
}
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
template <class T>
static NCVStatus decimateWrapperHost(T *h_src, Ncv32u srcStep,
T *h_dst, Ncv32u dstStep,
NcvSize32u srcRoi, Ncv32u scale)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(srcRoi.width != 0 && srcRoi.height != 0, NPPST_INVALID_ROI);
ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE);
ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) &&
dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale) &&
srcStep % sizeof(T) == 0 && dstStep % sizeof(T) == 0, NPPST_INVALID_STEP);
srcStep /= sizeof(T);
dstStep /= sizeof(T);
NcvSize32u dstRoi;
dstRoi.width = srcRoi.width / scale;
dstRoi.height = srcRoi.height / scale;
for (Ncv32u i=0; i<dstRoi.height; i++)
{
for (Ncv32u j=0; j<dstRoi.width; j++)
{
h_dst[i*dstStep+j] = h_src[i*scale*srcStep + j*scale];
}
}
return NPPST_SUCCESS;
}
#define implementNppDecimate(bit, typ) \
NCVStatus nppiStDecimate_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \
Ncv##bit##typ *d_dst, Ncv32u dstStep, \
NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) \
{ \
return decimateWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \
(Ncv##bit##u *)d_dst, dstStep, \
srcRoi, scale, readThruTexture); \
}
#define implementNppDecimateHost(bit, typ) \
NCVStatus nppiStDecimate_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \
Ncv##bit##typ *h_dst, Ncv32u dstStep, \
NcvSize32u srcRoi, Ncv32u scale) \
{ \
return decimateWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \
(Ncv##bit##u *)h_dst, dstStep, \
srcRoi, scale); \
}
implementNppDecimate(32, u)
implementNppDecimate(32, s)
implementNppDecimate(32, f)
implementNppDecimate(64, u)
implementNppDecimate(64, s)
implementNppDecimate(64, f)
implementNppDecimateHost(32, u)
implementNppDecimateHost(32, s)
implementNppDecimateHost(32, f)
implementNppDecimateHost(64, u)
implementNppDecimateHost(64, s)
implementNppDecimateHost(64, f)
//==============================================================================
//
// RectStdDev.cu
//
//==============================================================================
const Ncv32u NUM_RECTSTDDEV_THREADS = 128;
template <NcvBool tbCacheTexture>
__device__ Ncv32u getElemSum(Ncv32u x, Ncv32u *d_sum)
{
if (tbCacheTexture)
{
return tex1Dfetch(tex32u, x);
}
else
{
return d_sum[x];
}
}
template <NcvBool tbCacheTexture>
__device__ Ncv64u getElemSqSum(Ncv32u x, Ncv64u *d_sqsum)
{
if (tbCacheTexture)
{
uint2 tmp = tex1Dfetch(tex64u, x);
Ncv64u res = (Ncv64u)tmp.y;
res <<= 32;
res |= tmp.x;
return res;
}
else
{
return d_sqsum[x];
}
}
template <NcvBool tbCacheTexture>
__global__ void rectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep,
Ncv64u *d_sqsum, Ncv32u sqsumStep,
Ncv32f *d_norm, Ncv32u normStep,
NcvSize32u roi, NcvRect32u rect, Ncv32f invRectArea)
{
Ncv32u x_offs = blockIdx.x * NUM_RECTSTDDEV_THREADS + threadIdx.x;
if (x_offs >= roi.width)
{
return;
}
Ncv32u sum_offset = blockIdx.y * sumStep + x_offs;
Ncv32u sqsum_offset = blockIdx.y * sqsumStep + x_offs;
//OPT: try swapping order (could change cache hit/miss ratio)
Ncv32u sum_tl = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x, d_sum);
Ncv32u sum_bl = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x, d_sum);
Ncv32u sum_tr = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x + rect.width, d_sum);
Ncv32u sum_br = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width, d_sum);
Ncv32u sum_val = sum_br + sum_tl - sum_tr - sum_bl;
Ncv64u sqsum_tl, sqsum_bl, sqsum_tr, sqsum_br;
sqsum_tl = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x, d_sqsum);
sqsum_bl = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x, d_sqsum);
sqsum_tr = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x + rect.width, d_sqsum);
sqsum_br = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width, d_sqsum);
Ncv64u sqsum_val = sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl;
Ncv32f mean = sum_val * invRectArea;
//////////////////////////////////////////////////////////////////////////
// sqsum_val_res = sqsum_val / rectArea
//////////////////////////////////////////////////////////////////////////
Ncv32f sqsum_val_1 = __ull2float_rz(sqsum_val);
Ncv64u sqsum_val_2 = __float2ull_rz(sqsum_val_1);
Ncv64u sqsum_val_3 = sqsum_val - sqsum_val_2;
Ncv32f sqsum_val_4 = __ull2float_rn(sqsum_val_3);
sqsum_val_1 *= invRectArea;
sqsum_val_4 *= invRectArea;
Ncv32f sqsum_val_res = sqsum_val_1 + sqsum_val_4;
//////////////////////////////////////////////////////////////////////////
// variance = sqsum_val_res - mean * mean
//////////////////////////////////////////////////////////////////////////
#if defined DISABLE_MAD_SELECTIVELY
Ncv32f variance = sqsum_val_2 - __fmul_rn(mean, mean);
#else
Ncv32f variance = sqsum_val_res - mean * mean;
#endif
//////////////////////////////////////////////////////////////////////////
// stddev = sqrtf(variance)
//////////////////////////////////////////////////////////////////////////
//Ncv32f stddev = sqrtf(variance);
Ncv32f stddev = __fsqrt_rn(variance);
d_norm[blockIdx.y * normStep + x_offs] = stddev;
}
NCVStatus nppiStRectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep,
Ncv64u *d_sqsum, Ncv32u sqsumStep,
Ncv32f *d_norm, Ncv32u normStep,
NcvSize32u roi, NcvRect32u rect,
Ncv32f scaleArea, NcvBool readThruTexture)
{
ncvAssertReturn(d_sum != NULL && d_sqsum != NULL && d_norm != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) &&
sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) &&
normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) &&
sumStep % sizeof(Ncv32u) == 0 &&
sqsumStep % sizeof(Ncv64u) == 0 &&
normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP);
ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE);
sumStep /= sizeof(Ncv32u);
sqsumStep /= sizeof(Ncv64u);
normStep /= sizeof(Ncv32f);
Ncv32f rectArea = rect.width * rect.height * scaleArea;
Ncv32f invRectArea = 1.0f / rectArea;
dim3 grid(((roi.width + NUM_RECTSTDDEV_THREADS - 1) / NUM_RECTSTDDEV_THREADS), roi.height);
dim3 block(NUM_RECTSTDDEV_THREADS);
if (!readThruTexture)
{
hipLaunchKernelGGL(( rectStdDev_32f_C1R
<false>)
, dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(),
d_sum, sumStep, d_sqsum, sqsumStep, d_norm, normStep, roi, rect, invRectArea);
}
else
{
hipChannelFormatDesc cfdTexSrc;
hipChannelFormatDesc cfdTexSqr;
cfdTexSrc = hipCreateChannelDesc<Ncv32u>();
cfdTexSqr = hipCreateChannelDesc<uint2>();
size_t alignmentOffset;
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex32u, d_sum, cfdTexSrc, (roi.height + rect.y + rect.height) * sumStep * sizeof(Ncv32u)), NPPST_TEXTURE_BIND_ERROR);
ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR);
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex64u, d_sqsum, cfdTexSqr, (roi.height + rect.y + rect.height) * sqsumStep * sizeof(Ncv64u)), NPPST_TEXTURE_BIND_ERROR);
ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR);
hipLaunchKernelGGL(( rectStdDev_32f_C1R
<true>)
, dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(),
NULL, sumStep, NULL, sqsumStep, d_norm, normStep, roi, rect, invRectArea);
}
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
NCVStatus nppiStRectStdDev_32f_C1R_host(Ncv32u *h_sum, Ncv32u sumStep,
Ncv64u *h_sqsum, Ncv32u sqsumStep,
Ncv32f *h_norm, Ncv32u normStep,
NcvSize32u roi, NcvRect32u rect,
Ncv32f scaleArea)
{
ncvAssertReturn(h_sum != NULL && h_sqsum != NULL && h_norm != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) &&
sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) &&
normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) &&
sumStep % sizeof(Ncv32u) == 0 &&
sqsumStep % sizeof(Ncv64u) == 0 &&
normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP);
ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE);
sumStep /= sizeof(Ncv32u);
sqsumStep /= sizeof(Ncv64u);
normStep /= sizeof(Ncv32f);
Ncv32f rectArea = rect.width * rect.height * scaleArea;
Ncv32f invRectArea = 1.0f / rectArea;
for (Ncv32u i=0; i<roi.height; i++)
{
for (Ncv32u j=0; j<roi.width; j++)
{
Ncv32u sum_offset = i * sumStep + j;
Ncv32u sqsum_offset = i * sqsumStep + j;
Ncv32u sum_tl = h_sum[sum_offset + rect.y * sumStep + rect.x];
Ncv32u sum_bl = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x];
Ncv32u sum_tr = h_sum[sum_offset + rect.y * sumStep + rect.x + rect.width];
Ncv32u sum_br = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width];
Ncv64f sum_val = sum_br + sum_tl - sum_tr - sum_bl;
Ncv64u sqsum_tl = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x];
Ncv64u sqsum_bl = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x];
Ncv64u sqsum_tr = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x + rect.width];
Ncv64u sqsum_br = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width];
Ncv64f sqsum_val = (Ncv64f)(sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl);
Ncv64f mean = sum_val * invRectArea;
Ncv64f sqsum_val_2 = sqsum_val / rectArea;
Ncv64f variance = sqsum_val_2 - mean * mean;
h_norm[i * normStep + j] = (Ncv32f)sqrt(variance);
}
}
return NPPST_SUCCESS;
}
//==============================================================================
//
// Transpose.cu
//
//==============================================================================
const Ncv32u TRANSPOSE_TILE_DIM = 16;
const Ncv32u TRANSPOSE_BLOCK_ROWS = 16;
/**
* \brief Matrix transpose kernel
*
* Calculates transpose of the input image
* \see TRANSPOSE_TILE_DIM
*
* \tparam T_in Type of input image elements
* \tparam T_out Type of output image elements
*
* \param d_src [IN] Source image pointer
* \param srcStride [IN] Source image stride
* \param d_dst [OUT] Output image pointer
* \param dstStride [IN] Output image stride
*
* \return None
*/
template <class T>
__global__ void transpose(T *d_src, Ncv32u srcStride,
T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi)
{
__shared__ T tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM+1];
Ncv32u blockIdx_x, blockIdx_y;
// do diagonal reordering
if (gridDim.x == gridDim.y)
{
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x;
}
else
{
Ncv32u bid = blockIdx.x + gridDim.x * blockIdx.y;
blockIdx_y = bid % gridDim.y;
blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x;
}
Ncv32u xIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.x;
Ncv32u yIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.y;
Ncv32u index_gmem = xIndex + yIndex * srcStride;
if (xIndex < srcRoi.width)
{
for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if (yIndex + i < srcRoi.height)
{
tile[threadIdx.y+i][threadIdx.x] = d_src[index_gmem+i*srcStride];
}
}
}
__syncthreads();
xIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.x;
yIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.y;
index_gmem = xIndex + yIndex * dstStride;
if (xIndex < srcRoi.height)
{
for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if (yIndex + i < srcRoi.width)
{
d_dst[index_gmem+i*dstStride] = tile[threadIdx.x][threadIdx.y+i];
}
}
}
}
template <class T>
NCVStatus transposeWrapperDevice(T *d_src, Ncv32u srcStride,
T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi)
{
ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) &&
dstStride >= srcRoi.height * sizeof(T) &&
srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP);
srcStride /= sizeof(T);
dstStride /= sizeof(T);
dim3 grid((srcRoi.width + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM,
(srcRoi.height + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM);
dim3 block(TRANSPOSE_TILE_DIM, TRANSPOSE_TILE_DIM);
hipLaunchKernelGGL(( transpose
<T>)
, dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(),
d_src, srcStride, d_dst, dstStride, srcRoi);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
template <class T>
static NCVStatus transposeWrapperHost(T *h_src, Ncv32u srcStride,
T *h_dst, Ncv32u dstStride, NcvSize32u srcRoi)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) &&
dstStride >= srcRoi.height * sizeof(T) &&
srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP);
srcStride /= sizeof(T);
dstStride /= sizeof(T);
for (Ncv32u i=0; i<srcRoi.height; i++)
{
for (Ncv32u j=0; j<srcRoi.width; j++)
{
h_dst[j*dstStride+i] = h_src[i*srcStride + j];
}
}
return NPPST_SUCCESS;
}
#define implementNppTranspose(bit, typ) \
NCVStatus nppiStTranspose_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \
Ncv##bit##typ *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) \
{ \
return transposeWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \
(Ncv##bit##u *)d_dst, dstStep, srcRoi); \
}
#define implementNppTransposeHost(bit, typ) \
NCVStatus nppiStTranspose_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \
Ncv##bit##typ *h_dst, Ncv32u dstStep, \
NcvSize32u srcRoi) \
{ \
return transposeWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \
(Ncv##bit##u *)h_dst, dstStep, srcRoi); \
}
implementNppTranspose(32,u)
implementNppTranspose(32,s)
implementNppTranspose(32,f)
implementNppTranspose(64,u)
implementNppTranspose(64,s)
implementNppTranspose(64,f)
implementNppTransposeHost(32,u)
implementNppTransposeHost(32,s)
implementNppTransposeHost(32,f)
implementNppTransposeHost(64,u)
implementNppTransposeHost(64,s)
implementNppTransposeHost(64,f)
NCVStatus nppiStTranspose_128_C1R(void *d_src, Ncv32u srcStep,
void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi)
{
return transposeWrapperDevice<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi);
}
NCVStatus nppiStTranspose_128_C1R_host(void *d_src, Ncv32u srcStep,
void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi)
{
return transposeWrapperHost<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi);
}
//==============================================================================
//
// Compact.cu
//
//==============================================================================
const Ncv32u NUM_REMOVE_THREADS = 256;
template <bool bRemove, bool bWritePartial>
__global__ void removePass1Scan(Ncv32u *d_src, Ncv32u srcLen,
Ncv32u *d_offsets, Ncv32u *d_blockSums,
Ncv32u elemRemove)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x;
if (elemAddrIn > srcLen + blockDim.x)
{
return;
}
__shared__ Ncv32u shmem[NUM_REMOVE_THREADS * 2];
Ncv32u scanElem = 0;
if (elemAddrIn < srcLen)
{
if (bRemove)
{
scanElem = (d_src[elemAddrIn] != elemRemove) ? 1 : 0;
}
else
{
scanElem = d_src[elemAddrIn];
}
}
Ncv32u localScanInc = blockScanInclusive<Ncv32u, NUM_REMOVE_THREADS>(scanElem, shmem);
__syncthreads();
if (elemAddrIn < srcLen)
{
if (threadIdx.x == NUM_REMOVE_THREADS-1 && bWritePartial)
{
d_blockSums[blockId] = localScanInc;
}
if (bRemove)
{
d_offsets[elemAddrIn] = localScanInc - scanElem;
}
else
{
d_src[elemAddrIn] = localScanInc - scanElem;
}
}
}
__global__ void removePass2Adjust(Ncv32u *d_offsets, Ncv32u srcLen, Ncv32u *d_blockSums)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x;
if (elemAddrIn >= srcLen)
{
return;
}
__shared__ Ncv32u valOffs;
valOffs = d_blockSums[blockId];
__syncthreads();
d_offsets[elemAddrIn] += valOffs;
}
__global__ void removePass3Compact(Ncv32u *d_src, Ncv32u srcLen,
Ncv32u *d_offsets, Ncv32u *d_dst,
Ncv32u elemRemove, Ncv32u *dstLenValue)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x;
if (elemAddrIn >= srcLen)
{
return;
}
Ncv32u elem = d_src[elemAddrIn];
Ncv32u elemAddrOut = d_offsets[elemAddrIn];
if (elem != elemRemove)
{
d_dst[elemAddrOut] = elem;
}
if (elemAddrIn == srcLen-1)
{
if (elem != elemRemove)
{
*dstLenValue = elemAddrOut + 1;
}
else
{
*dstLenValue = elemAddrOut;
}
}
}
NCVStatus compactVector_32u_device(Ncv32u *d_src, Ncv32u srcLen,
Ncv32u *d_dst, Ncv32u *dstLenPinned,
Ncv32u elemRemove,
INCVMemAllocator &gpuAllocator)
{
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR);
if (srcLen == 0)
{
if (dstLenPinned != NULL)
{
*dstLenPinned = 0;
}
return NPPST_SUCCESS;
}
std::vector<Ncv32u> partSumNums;
std::vector<Ncv32u> partSumOffsets;
Ncv32u partSumLastNum = srcLen;
Ncv32u partSumLastOffs = 0;
do
{
partSumNums.push_back(partSumLastNum);
partSumOffsets.push_back(partSumLastOffs);
Ncv32u curPartSumAlignedLength = alignUp(partSumLastNum * sizeof(Ncv32u),
gpuAllocator.alignment()) / sizeof(Ncv32u);
partSumLastOffs += curPartSumAlignedLength;
partSumLastNum = (partSumLastNum + NUM_REMOVE_THREADS - 1) / NUM_REMOVE_THREADS;
}
while (partSumLastNum>1);
partSumNums.push_back(partSumLastNum);
partSumOffsets.push_back(partSumLastOffs);
NCVVectorAlloc<Ncv32u> d_hierSums(gpuAllocator, partSumLastOffs+1);
ncvAssertReturn(gpuAllocator.isCounting() || d_hierSums.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCVVectorAlloc<Ncv32u> d_numDstElements(gpuAllocator, 1);
ncvAssertReturn(gpuAllocator.isCounting() || d_numDstElements.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
NCV_SKIP_COND_BEGIN
dim3 block(NUM_REMOVE_THREADS);
//calculate zero-level partial sums for indices calculation
if (partSumNums.size() > 2)
{
dim3 grid(partSumNums[1]);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
hipLaunchKernelGGL(( removePass1Scan
<true, true>)
, dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(),
d_src, srcLen,
d_hierSums.ptr(),
d_hierSums.ptr() + partSumOffsets[1],
elemRemove);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
//calculate hierarchical partial sums
for (Ncv32u i=1; i<partSumNums.size()-1; i++)
{
dim3 grid_partial(partSumNums[i+1]);
if (grid_partial.x > 65535)
{
grid_partial.y = (grid_partial.x + 65534) / 65535;
grid_partial.x = 65535;
}
if (grid_partial.x != 1)
{
hipLaunchKernelGGL(( removePass1Scan
<false, true>)
, dim3(grid_partial), dim3(block), 0, nppStGetActiveCUDAstream(),
d_hierSums.ptr() + partSumOffsets[i],
partSumNums[i], NULL,
d_hierSums.ptr() + partSumOffsets[i+1],
0);
}
else
{
hipLaunchKernelGGL(( removePass1Scan
<false, false>)
, dim3(grid_partial), dim3(block), 0, nppStGetActiveCUDAstream(),
d_hierSums.ptr() + partSumOffsets[i],
partSumNums[i], NULL,
NULL,
0);
}
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
}
//adjust hierarchical partial sums
for (Ncv32s i=(Ncv32s)partSumNums.size()-3; i>=0; i--)
{
dim3 grid_local(partSumNums[i+1]);
if (grid_local.x > 65535)
{
grid_local.y = (grid_local.x + 65534) / 65535;
grid_local.x = 65535;
}
hipLaunchKernelGGL(( removePass2Adjust)
, dim3(grid_local), dim3(block), 0, nppStGetActiveCUDAstream(),
d_hierSums.ptr() + partSumOffsets[i], partSumNums[i],
d_hierSums.ptr() + partSumOffsets[i+1]);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
}
}
else
{
dim3 grid_local(partSumNums[1]);
hipLaunchKernelGGL(( removePass1Scan
<true, false>)
, dim3(grid_local), dim3(block), 0, nppStGetActiveCUDAstream(),
d_src, srcLen,
d_hierSums.ptr(),
NULL, elemRemove);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
}
//compact source vector using indices
dim3 grid(partSumNums[1]);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
hipLaunchKernelGGL(( removePass3Compact)
, dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(),
d_src, srcLen, d_hierSums.ptr(), d_dst,
elemRemove, d_numDstElements.ptr());
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
//get number of dst elements
if (dstLenPinned != NULL)
{
ncvAssertCUDAReturn(hipMemcpyAsync(dstLenPinned, d_numDstElements.ptr(), sizeof(Ncv32u),
hipMemcpyDeviceToHost, nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR);
}
NCV_SKIP_COND_END
return NPPST_SUCCESS;
}
NCVStatus nppsStCompactGetSize_32u(Ncv32u srcLen, Ncv32u *pBufsize, hipDeviceProp_t &devProp)
{
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
if (srcLen == 0)
{
*pBufsize = 0;
return NPPST_SUCCESS;
}
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = compactVector_32u_device(NULL, srcLen, NULL, NULL, 0xC001C0DE,
gpuCounter);
ncvAssertReturnNcvStat(ncvStat);
*pBufsize = (Ncv32u)gpuCounter.maxSize();
return NPPST_SUCCESS;
}
NCVStatus nppsStCompactGetSize_32s(Ncv32u srcLen, Ncv32u *pBufsize, hipDeviceProp_t &devProp)
{
return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp);
}
NCVStatus nppsStCompactGetSize_32f(Ncv32u srcLen, Ncv32u *pBufsize, hipDeviceProp_t &devProp)
{
return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp);
}
NCVStatus nppsStCompact_32u(Ncv32u *d_src, Ncv32u srcLen,
Ncv32u *d_dst, Ncv32u *p_dstLen,
Ncv32u elemRemove, Ncv8u *pBuffer,
Ncv32u bufSize, hipDeviceProp_t &devProp)
{
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = compactVector_32u_device(d_src, srcLen, d_dst, p_dstLen, elemRemove,
gpuAllocator);
ncvAssertReturnNcvStat(ncvStat);
return NPPST_SUCCESS;
}
NCVStatus nppsStCompact_32s(Ncv32s *d_src, Ncv32u srcLen,
Ncv32s *d_dst, Ncv32u *p_dstLen,
Ncv32s elemRemove, Ncv8u *pBuffer,
Ncv32u bufSize, hipDeviceProp_t &devProp)
{
return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen,
*(Ncv32u *)&elemRemove, pBuffer, bufSize, devProp);
}
#if defined __GNUC__ && __GNUC__ > 2 && __GNUC_MINOR__ > 4
typedef Ncv32u __attribute__((__may_alias__)) Ncv32u_a;
#else
typedef Ncv32u Ncv32u_a;
#endif
NCVStatus nppsStCompact_32f(Ncv32f *d_src, Ncv32u srcLen,
Ncv32f *d_dst, Ncv32u *p_dstLen,
Ncv32f elemRemove, Ncv8u *pBuffer,
Ncv32u bufSize, hipDeviceProp_t &devProp)
{
return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen,
*(Ncv32u_a *)&elemRemove, pBuffer, bufSize, devProp);
}
NCVStatus nppsStCompact_32u_host(Ncv32u *h_src, Ncv32u srcLen,
Ncv32u *h_dst, Ncv32u *dstLen, Ncv32u elemRemove)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
if (srcLen == 0)
{
if (dstLen != NULL)
{
*dstLen = 0;
}
return NPPST_SUCCESS;
}
Ncv32u dstIndex = 0;
for (Ncv32u srcIndex=0; srcIndex<srcLen; srcIndex++)
{
if (h_src[srcIndex] != elemRemove)
{
h_dst[dstIndex++] = h_src[srcIndex];
}
}
if (dstLen != NULL)
{
*dstLen = dstIndex;
}
return NPPST_SUCCESS;
}
NCVStatus nppsStCompact_32s_host(Ncv32s *h_src, Ncv32u srcLen,
Ncv32s *h_dst, Ncv32u *dstLen, Ncv32s elemRemove)
{
return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u_a *)&elemRemove);
}
NCVStatus nppsStCompact_32f_host(Ncv32f *h_src, Ncv32u srcLen,
Ncv32f *h_dst, Ncv32u *dstLen, Ncv32f elemRemove)
{
return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u_a *)&elemRemove);
}
//==============================================================================
//
// Filter.cu
//
//==============================================================================
texture <float, 1, hipReadModeElementType> texSrc;
texture <float, 1, hipReadModeElementType> texKernel;
__forceinline__ __device__ float getValueMirrorRow(const int rowOffset,
int i,
int w)
{
if (i < 0) i = 1 - i;
if (i >= w) i = w + w - i - 1;
return tex1Dfetch (texSrc, rowOffset + i);
}
__forceinline__ __device__ float getValueMirrorColumn(const int offset,
const int rowStep,
int j,
int h)
{
if (j < 0) j = 1 - j;
if (j >= h) j = h + h - j - 1;
return tex1Dfetch (texSrc, offset + j * rowStep);
}
__global__ void FilterRowBorderMirror_32f_C1R(Ncv32u srcStep,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u dstStep,
NcvRect32u roi,
Ncv32s nKernelSize,
Ncv32s nAnchor,
Ncv32f multiplier)
{
// position within ROI
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix >= roi.width || iy >= roi.height)
{
return;
}
const int p = nKernelSize - nAnchor - 1;
const int j = roi.y + iy;
const int rowOffset = j * srcStep + roi.x;
float sum = 0.0f;
for (int m = 0; m < nKernelSize; ++m)
{
sum += getValueMirrorRow (rowOffset, ix + m - p, roi.width)
* tex1Dfetch (texKernel, m);
}
pDst[iy * dstStep + ix] = sum * multiplier;
}
__global__ void FilterColumnBorderMirror_32f_C1R(Ncv32u srcStep,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u dstStep,
NcvRect32u roi,
Ncv32s nKernelSize,
Ncv32s nAnchor,
Ncv32f multiplier)
{
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix >= roi.width || iy >= roi.height)
{
return;
}
const int p = nKernelSize - nAnchor - 1;
const int i = roi.x + ix;
const int offset = i + roi.y * srcStep;
float sum = 0.0f;
for (int m = 0; m < nKernelSize; ++m)
{
sum += getValueMirrorColumn (offset, srcStep, iy + m - p, roi.height)
* tex1Dfetch (texKernel, m);
}
pDst[ix + iy * dstStep] = sum * multiplier;
}
NCVStatus nppiStFilterRowBorder_32f_C1R(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u nDstStep,
NcvRect32u oROI,
NppStBorderType borderType,
const Ncv32f *pKernel,
Ncv32s nKernelSize,
Ncv32s nAnchor,
Ncv32f multiplier)
{
ncvAssertReturn (pSrc != NULL &&
pDst != NULL &&
pKernel != NULL, NCV_NULL_PTR);
ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep &&
dstSize.width * sizeof (Ncv32f) <= nDstStep &&
oROI.width * sizeof (Ncv32f) <= nSrcStep &&
oROI.width * sizeof (Ncv32f) <= nDstStep &&
nSrcStep % sizeof (Ncv32f) == 0 &&
nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u dstStep = nDstStep / sizeof (Ncv32f);
// adjust ROI size to be within source image
if (oROI.x + oROI.width > srcSize.width)
{
oROI.width = srcSize.width - oROI.x;
}
if (oROI.y + oROI.height > srcSize.height)
{
oROI.height = srcSize.height - oROI.y;
}
hipChannelFormatDesc floatChannel = hipCreateChannelDesc <float> ();
texSrc.normalized = false;
texKernel.normalized = false;
hipBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep);
hipBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f));
dim3 ctaSize (32, 6);
dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x,
(oROI.height + ctaSize.y - 1) / ctaSize.y);
switch (borderType)
{
case nppStBorderNone:
return NPPST_ERROR;
case nppStBorderClamp:
return NPPST_ERROR;
case nppStBorderWrap:
return NPPST_ERROR;
case nppStBorderMirror:
hipLaunchKernelGGL(( FilterRowBorderMirror_32f_C1R) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (),
srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
break;
default:
return NPPST_ERROR;
}
return NPPST_SUCCESS;
}
NCVStatus nppiStFilterColumnBorder_32f_C1R(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u nDstStep,
NcvRect32u oROI,
NppStBorderType borderType,
const Ncv32f *pKernel,
Ncv32s nKernelSize,
Ncv32s nAnchor,
Ncv32f multiplier)
{
ncvAssertReturn (pSrc != NULL &&
pDst != NULL &&
pKernel != NULL, NCV_NULL_PTR);
ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep &&
dstSize.width * sizeof (Ncv32f) <= nDstStep &&
oROI.width * sizeof (Ncv32f) <= nSrcStep &&
oROI.width * sizeof (Ncv32f) <= nDstStep &&
nSrcStep % sizeof (Ncv32f) == 0 &&
nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u dstStep = nDstStep / sizeof (Ncv32f);
// adjust ROI size to be within source image
if (oROI.x + oROI.width > srcSize.width)
{
oROI.width = srcSize.width - oROI.x;
}
if (oROI.y + oROI.height > srcSize.height)
{
oROI.height = srcSize.height - oROI.y;
}
hipChannelFormatDesc floatChannel = hipCreateChannelDesc <float> ();
texSrc.normalized = false;
texKernel.normalized = false;
hipBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep);
hipBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f));
dim3 ctaSize (32, 6);
dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x,
(oROI.height + ctaSize.y - 1) / ctaSize.y);
switch (borderType)
{
case nppStBorderClamp:
return NPPST_ERROR;
case nppStBorderWrap:
return NPPST_ERROR;
case nppStBorderMirror:
hipLaunchKernelGGL(( FilterColumnBorderMirror_32f_C1R) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (),
srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
break;
default:
return NPPST_ERROR;
}
return NPPST_SUCCESS;
}
//==============================================================================
//
// FrameInterpolate.cu
//
//==============================================================================
inline Ncv32u iDivUp(Ncv32u num, Ncv32u denom)
{
return (num + denom - 1)/denom;
}
texture<float, 2, hipReadModeElementType> tex_src1;
texture<float, 2, hipReadModeElementType> tex_src0;
__global__ void BlendFramesKernel(const float *u, const float *v, // forward flow
const float *ur, const float *vr, // backward flow
const float *o0, const float *o1, // coverage masks
int w, int h, int s,
float theta, float *out)
{
const int ix = threadIdx.x + blockDim.x * blockIdx.x;
const int iy = threadIdx.y + blockDim.y * blockIdx.y;
const int pos = ix + s * iy;
if (ix >= w || iy >= h) return;
float _u = u[pos];
float _v = v[pos];
float _ur = ur[pos];
float _vr = vr[pos];
float x = (float)ix + 0.5f;
float y = (float)iy + 0.5f;
bool b0 = o0[pos] > 1e-4f;
bool b1 = o1[pos] > 1e-4f;
if (b0 && b1)
{
// pixel is visible on both frames
out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta) * (1.0f - theta) +
tex2D(tex_src1, x + _u * (1.0f - theta), y + _v * (1.0f - theta)) * theta;
}
else if (b0)
{
// visible on the first frame only
out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta);
}
else
{
// visible on the second frame only
out[pos] = tex2D(tex_src1, x - _ur * (1.0f - theta), y - _vr * (1.0f - theta));
}
}
NCVStatus BlendFrames(const Ncv32f *src0,
const Ncv32f *src1,
const Ncv32f *ufi,
const Ncv32f *vfi,
const Ncv32f *ubi,
const Ncv32f *vbi,
const Ncv32f *o1,
const Ncv32f *o2,
Ncv32u width,
Ncv32u height,
Ncv32u stride,
Ncv32f theta,
Ncv32f *out)
{
tex_src1.addressMode[0] = hipAddressModeClamp;
tex_src1.addressMode[1] = hipAddressModeClamp;
tex_src1.filterMode = hipFilterModeLinear;
tex_src1.normalized = false;
tex_src0.addressMode[0] = hipAddressModeClamp;
tex_src0.addressMode[1] = hipAddressModeClamp;
tex_src0.filterMode = hipFilterModeLinear;
tex_src0.normalized = false;
hipChannelFormatDesc desc = hipCreateChannelDesc <float> ();
const Ncv32u pitch = stride * sizeof (float);
ncvAssertCUDAReturn (hipBindTexture2D (0, tex_src1, src1, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR);
ncvAssertCUDAReturn (hipBindTexture2D (0, tex_src0, src0, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR);
dim3 threads (32, 4);
dim3 blocks (iDivUp (width, threads.x), iDivUp (height, threads.y));
hipLaunchKernelGGL(( BlendFramesKernel), dim3(blocks), dim3(threads), 0, nppStGetActiveCUDAstream (),
ufi, vfi, ubi, vbi, o1, o2, width, height, stride, theta, out);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
NCVStatus nppiStGetInterpolationBufferSize(NcvSize32u srcSize,
Ncv32u nStep,
Ncv32u *hpSize)
{
NCVStatus status = NPPST_ERROR;
status = nppiStVectorWarpGetBufferSize(srcSize, nStep, hpSize);
return status;
}
NCVStatus nppiStInterpolateFrames(const NppStInterpolationState *pState)
{
// check state validity
ncvAssertReturn (pState->pSrcFrame0 != 0 &&
pState->pSrcFrame1 != 0 &&
pState->pFU != 0 &&
pState->pFV != 0 &&
pState->pBU != 0 &&
pState->pBV != 0 &&
pState->pNewFrame != 0 &&
pState->ppBuffers[0] != 0 &&
pState->ppBuffers[1] != 0 &&
pState->ppBuffers[2] != 0 &&
pState->ppBuffers[3] != 0 &&
pState->ppBuffers[4] != 0 &&
pState->ppBuffers[5] != 0, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (pState->size.width > 0 &&
pState->size.height > 0, NPPST_ERROR);
ncvAssertReturn (pState->nStep >= pState->size.width * sizeof (Ncv32f) &&
pState->nStep > 0 &&
pState->nStep % sizeof (Ncv32f) == 0,
NPPST_INVALID_STEP);
// change notation
Ncv32f *cov0 = pState->ppBuffers[0];
Ncv32f *cov1 = pState->ppBuffers[1];
Ncv32f *fwdU = pState->ppBuffers[2]; // forward u
Ncv32f *fwdV = pState->ppBuffers[3]; // forward v
Ncv32f *bwdU = pState->ppBuffers[4]; // backward u
Ncv32f *bwdV = pState->ppBuffers[5]; // backward v
// warp flow
ncvAssertReturnNcvStat (
nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFU,
pState->size,
pState->nStep,
pState->pFU,
pState->pFV,
pState->nStep,
cov0,
pState->pos,
fwdU) );
ncvAssertReturnNcvStat (
nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFV,
pState->size,
pState->nStep,
pState->pFU,
pState->pFV,
pState->nStep,
cov0,
pState->pos,
fwdV) );
// warp backward flow
ncvAssertReturnNcvStat (
nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBU,
pState->size,
pState->nStep,
pState->pBU,
pState->pBV,
pState->nStep,
cov1,
1.0f - pState->pos,
bwdU) );
ncvAssertReturnNcvStat (
nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBV,
pState->size,
pState->nStep,
pState->pBU,
pState->pBV,
pState->nStep,
cov1,
1.0f - pState->pos,
bwdU) );
// interpolate frame
ncvAssertReturnNcvStat (
BlendFrames (pState->pSrcFrame0,
pState->pSrcFrame1,
fwdU,
fwdV,
bwdU,
bwdV,
cov0,
cov1,
pState->size.width,
pState->size.height,
pState->nStep / sizeof (Ncv32f),
pState->pos,
pState->pNewFrame) );
return NPPST_SUCCESS;
}
//==============================================================================
//
// VectorWarpFrame.cu
//
//==============================================================================
#if __CUDA_ARCH__ < 200
// FP32 atomic add
static __forceinline__ __device__ float _atomicAdd(float *addr, float val)
{
float old = *addr, assumed;
do {
assumed = old;
old = int_as_float(__iAtomicCAS((int*)addr,
float_as_int(assumed),
float_as_int(val+assumed)));
} while( assumed!=old );
return old;
}
#else
#define _atomicAdd atomicAdd
#endif
__global__ void ForwardWarpKernel_PSF2x2(const float *u,
const float *v,
const float *src,
const int w,
const int h,
const int flow_stride,
const int image_stride,
const float time_scale,
float *normalization_factor,
float *dst)
{
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
if (i >= h || j >= w) return;
int flow_row_offset = i * flow_stride;
int image_row_offset = i * image_stride;
//bottom left corner of a target pixel
float cx = u[flow_row_offset + j] * time_scale + (float)j + 1.0f;
float cy = v[flow_row_offset + j] * time_scale + (float)i + 1.0f;
// pixel containing bottom left corner
float px;
float py;
float dx = modff (cx, &px);
float dy = modff (cy, &py);
// target pixel integer coords
int tx;
int ty;
tx = (int) px;
ty = (int) py;
float value = src[image_row_offset + j];
float weight;
// fill pixel containing bottom right corner
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
weight = dx * dy;
_atomicAdd (dst + ty * image_stride + tx, value * weight);
_atomicAdd (normalization_factor + ty * image_stride + tx, weight);
}
// fill pixel containing bottom left corner
tx -= 1;
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
weight = (1.0f - dx) * dy;
_atomicAdd (dst + ty * image_stride + tx, value * weight);
_atomicAdd (normalization_factor + ty * image_stride + tx, weight);
}
// fill pixel containing upper left corner
ty -= 1;
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
weight = (1.0f - dx) * (1.0f - dy);
_atomicAdd (dst + ty * image_stride + tx, value * weight);
_atomicAdd (normalization_factor + ty * image_stride + tx, weight);
}
// fill pixel containing upper right corner
tx += 1;
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
weight = dx * (1.0f - dy);
_atomicAdd (dst + ty * image_stride + tx, value * weight);
_atomicAdd (normalization_factor + ty * image_stride + tx, weight);
}
}
__global__ void ForwardWarpKernel_PSF1x1(const float *u,
const float *v,
const float *src,
const int w,
const int h,
const int flow_stride,
const int image_stride,
const float time_scale,
float *dst)
{
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
if (i >= h || j >= w) return;
int flow_row_offset = i * flow_stride;
int image_row_offset = i * image_stride;
float u_ = u[flow_row_offset + j];
float v_ = v[flow_row_offset + j];
//bottom left corner of target pixel
float cx = u_ * time_scale + (float)j + 1.0f;
float cy = v_ * time_scale + (float)i + 1.0f;
// pixel containing bottom left corner
int tx = __float2int_rn (cx);
int ty = __float2int_rn (cy);
float value = src[image_row_offset + j];
// fill pixel
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
_atomicAdd (dst + ty * image_stride + tx, value);
}
}
__global__ void NormalizeKernel(const float *normalization_factor, int w, int h, int s, float *image)
{
int i = threadIdx.y + blockDim.y * blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= h || j >= w) return;
const int pos = i * s + j;
float scale = normalization_factor[pos];
float invScale = (scale == 0.0f) ? 1.0f : (1.0f / scale);
image[pos] *= invScale;
}
__global__ void MemsetKernel(const float value, int w, int h, float *image)
{
int i = threadIdx.y + blockDim.y * blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= h || j >= w) return;
const int pos = i * w + j;
image[pos] = value;
}
NCVStatus nppiStVectorWarpGetBufferSize (NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32u *hpSize)
{
ncvAssertReturn (hpSize != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep,
NPPST_INVALID_STEP);
*hpSize = nSrcStep * srcSize.height;
return NPPST_SUCCESS;
}
// does not require normalization
NCVStatus nppiStVectorWarp_PSF1x1_32f_C1(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
const Ncv32f *pU,
const Ncv32f *pV,
Ncv32u nVFStep,
Ncv32f timeScale,
Ncv32f *pDst)
{
ncvAssertReturn (pSrc != NULL &&
pU != NULL &&
pV != NULL &&
pDst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep &&
srcSize.width * sizeof (Ncv32f) <= nVFStep,
NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u vfStep = nVFStep / sizeof (Ncv32f);
dim3 ctaSize (32, 6);
dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y));
hipLaunchKernelGGL(( ForwardWarpKernel_PSF1x1) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(),
pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pDst);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
NCVStatus nppiStVectorWarp_PSF2x2_32f_C1(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
const Ncv32f *pU,
const Ncv32f *pV,
Ncv32u nVFStep,
Ncv32f *pBuffer,
Ncv32f timeScale,
Ncv32f *pDst)
{
ncvAssertReturn (pSrc != NULL &&
pU != NULL &&
pV != NULL &&
pDst != NULL &&
pBuffer != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep &&
srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u vfStep = nVFStep / sizeof(Ncv32f);
dim3 ctaSize(32, 6);
dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y));
hipLaunchKernelGGL(( MemsetKernel) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(),
0, srcSize.width, srcSize.height, pBuffer);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
hipLaunchKernelGGL(( ForwardWarpKernel_PSF2x2) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(),
pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pBuffer, pDst);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
hipLaunchKernelGGL(( NormalizeKernel) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(),
pBuffer, srcSize.width, srcSize.height, srcStep, pDst);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
//==============================================================================
//
// Resize.cu
//
//==============================================================================
texture <float, 2, hipReadModeElementType> texSrc2D;
__forceinline__
__device__ float processLine(int spos,
float xmin,
float xmax,
int ixmin,
int ixmax,
float fxmin,
float cxmax)
{
// first element
float wsum = 1.0f - xmin + fxmin;
float sum = tex1Dfetch(texSrc, spos) * (1.0f - xmin + fxmin);
spos++;
for (int ix = ixmin + 1; ix < ixmax; ++ix)
{
sum += tex1Dfetch(texSrc, spos);
spos++;
wsum += 1.0f;
}
sum += tex1Dfetch(texSrc, spos) * (cxmax - xmax);
wsum += cxmax - xmax;
return sum / wsum;
}
__global__ void resizeSuperSample_32f(NcvSize32u srcSize,
Ncv32u srcStep,
NcvRect32u srcROI,
Ncv32f *dst,
NcvSize32u dstSize,
Ncv32u dstStep,
NcvRect32u dstROI,
Ncv32f scaleX,
Ncv32f scaleY)
{
// position within dst ROI
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
const int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= dstROI.width || iy >= dstROI.height)
{
return;
}
float rw = (float) srcROI.width;
float rh = (float) srcROI.height;
// source position
float x = scaleX * (float) ix;
float y = scaleY * (float) iy;
// x sampling range
float xBegin = fmax (x - scaleX, 0.0f);
float xEnd = fmin (x + scaleX, rw - 1.0f);
// y sampling range
float yBegin = fmax (y - scaleY, 0.0f);
float yEnd = fmin (y + scaleY, rh - 1.0f);
// x range of source samples
float floorXBegin = floorf (xBegin);
float ceilXEnd = ceilf (xEnd);
int iXBegin = srcROI.x + (int) floorXBegin;
int iXEnd = srcROI.x + (int) ceilXEnd;
// y range of source samples
float floorYBegin = floorf (yBegin);
float ceilYEnd = ceilf (yEnd);
int iYBegin = srcROI.y + (int) floorYBegin;
int iYEnd = srcROI.y + (int) ceilYEnd;
// first row
int pos = iYBegin * srcStep + iXBegin;
float wsum = 1.0f - yBegin + floorYBegin;
float sum = processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin,
ceilXEnd) * (1.0f - yBegin + floorYBegin);
pos += srcStep;
for (int iy = iYBegin + 1; iy < iYEnd; ++iy)
{
sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin,
ceilXEnd);
pos += srcStep;
wsum += 1.0f;
}
sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin,
ceilXEnd) * (ceilYEnd - yEnd);
wsum += ceilYEnd - yEnd;
sum /= wsum;
dst[(ix + dstROI.x) + (iy + dstROI.y) * dstStep] = sum;
}
// bicubic interpolation
__forceinline__
__device__ float bicubicCoeff(float x_)
{
float x = fabsf(x_);
if (x <= 1.0f)
{
return x * x * (1.5f * x - 2.5f) + 1.0f;
}
else if (x < 2.0f)
{
return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f;
}
else
{
return 0.0f;
}
}
__global__ void resizeBicubic(NcvSize32u srcSize,
NcvRect32u srcROI,
NcvSize32u dstSize,
Ncv32u dstStep,
Ncv32f *dst,
NcvRect32u dstROI,
Ncv32f scaleX,
Ncv32f scaleY)
{
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
const int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= dstROI.width || iy >= dstROI.height)
{
return;
}
const float dx = 1.0f / srcROI.width;
const float dy = 1.0f / srcROI.height;
float rx = (float) srcROI.x;
float ry = (float) srcROI.y;
float rw = (float) srcROI.width;
float rh = (float) srcROI.height;
float x = scaleX * (float) ix;
float y = scaleY * (float) iy;
// sampling range
// border mode is clamp
float xmin = fmax (ceilf (x - 2.0f), 0.0f);
float xmax = fmin (floorf (x + 2.0f), rw - 1.0f);
float ymin = fmax (ceilf (y - 2.0f), 0.0f);
float ymax = fmin (floorf (y + 2.0f), rh - 1.0f);
// shift data window to match ROI
rx += 0.5f;
ry += 0.5f;
x += rx;
y += ry;
xmin += rx;
xmax += rx;
ymin += ry;
ymax += ry;
float sum = 0.0f;
float wsum = 0.0f;
for (float cy = ymin; cy <= ymax; cy += 1.0f)
{
for (float cx = xmin; cx <= xmax; cx += 1.0f)
{
float xDist = x - cx;
float yDist = y - cy;
float wx = bicubicCoeff (xDist);
float wy = bicubicCoeff (yDist);
wx *= wy;
sum += wx * tex2D (texSrc2D, cx * dx, cy * dy);
wsum += wx;
}
}
dst[(ix + dstROI.x)+ (iy + dstROI.y) * dstStep] = (!wsum)? 0 : sum / wsum;
}
NCVStatus nppiStResize_32f_C1R(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
NcvRect32u srcROI,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u nDstStep,
NcvRect32u dstROI,
Ncv32f xFactor,
Ncv32f yFactor,
NppStInterpMode interpolation)
{
NCVStatus status = NPPST_SUCCESS;
ncvAssertReturn (pSrc != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (xFactor != 0.0 && yFactor != 0.0, NPPST_INVALID_SCALE);
ncvAssertReturn (nSrcStep >= sizeof (Ncv32f) * (Ncv32u) srcSize.width &&
nDstStep >= sizeof (Ncv32f) * (Ncv32f) dstSize.width,
NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u dstStep = nDstStep / sizeof (Ncv32f);
// TODO: preprocess ROI to prevent out of bounds access
if (interpolation == nppStSupersample)
{
// bind texture
hipBindTexture (0, texSrc, pSrc, srcSize.height * nSrcStep);
// invoke kernel
dim3 ctaSize (32, 6);
dim3 gridSize ((dstROI.width + ctaSize.x - 1) / ctaSize.x,
(dstROI.height + ctaSize.y - 1) / ctaSize.y);
hipLaunchKernelGGL(( resizeSuperSample_32f) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (),
srcSize, srcStep, srcROI, pDst, dstSize, dstStep, dstROI, 1.0f / xFactor, 1.0f / yFactor);
}
else if (interpolation == nppStBicubic)
{
texSrc2D.addressMode[0] = hipAddressModeMirror;
texSrc2D.addressMode[1] = hipAddressModeMirror;
texSrc2D.normalized = true;
hipChannelFormatDesc desc = hipCreateChannelDesc <float> ();
hipBindTexture2D (0, texSrc2D, pSrc, desc, srcSize.width, srcSize.height,
nSrcStep);
dim3 ctaSize (32, 6);
dim3 gridSize ((dstSize.width + ctaSize.x - 1) / ctaSize.x,
(dstSize.height + ctaSize.y - 1) / ctaSize.y);
hipLaunchKernelGGL(( resizeBicubic) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (),
srcSize, srcROI, dstSize, dstStep, pDst, dstROI, 1.0f / xFactor, 1.0f / yFactor);
}
else
{
status = NPPST_ERROR;
}
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return status;
}
|
f03aa116ef7948115792c37459068c1c8bcea6d1.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <vector>
#include <cuda_runtime.h>
#include "NPP_staging.hpp"
texture<Ncv8u, 1, cudaReadModeElementType> tex8u;
texture<Ncv32u, 1, cudaReadModeElementType> tex32u;
texture<uint2, 1, cudaReadModeElementType> tex64u;
//==============================================================================
//
// CUDA streams handling
//
//==============================================================================
static cudaStream_t nppStream = 0;
cudaStream_t nppStGetActiveCUDAstream(void)
{
return nppStream;
}
cudaStream_t nppStSetActiveCUDAstream(cudaStream_t cudaStream)
{
cudaStream_t tmp = nppStream;
nppStream = cudaStream;
return tmp;
}
//==============================================================================
//
// BlockScan.cuh
//
//==============================================================================
NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive
//Almost the same as naive scan1Inclusive, but doesn't need __syncthreads()
//assuming size <= WARP_SIZE and size is power of 2
template <class T>
inline __device__ T warpScanInclusive(T idata, volatile T *s_Data)
{
Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1));
s_Data[pos] = 0;
pos += K_WARP_SIZE;
s_Data[pos] = idata;
s_Data[pos] += s_Data[pos - 1];
s_Data[pos] += s_Data[pos - 2];
s_Data[pos] += s_Data[pos - 4];
s_Data[pos] += s_Data[pos - 8];
s_Data[pos] += s_Data[pos - 16];
return s_Data[pos];
}
template <class T>
inline __device__ T warpScanExclusive(T idata, volatile T *s_Data)
{
return warpScanInclusive(idata, s_Data) - idata;
}
template <class T, Ncv32u tiNumScanThreads>
inline __device__ T blockScanInclusive(T idata, volatile T *s_Data)
{
if (tiNumScanThreads > K_WARP_SIZE)
{
//Bottom-level inclusive warp scan
T warpResult = warpScanInclusive(idata, s_Data);
//Save top elements of each warp for exclusive warp scan
//sync to wait for warp scans to complete (because s_Data is being overwritten)
__syncthreads();
if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) )
{
s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult;
}
//wait for warp scans to complete
__syncthreads();
if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) )
{
//grab top warp elements
T val = s_Data[threadIdx.x];
//calculate exclusive scan and write back to shared memory
s_Data[threadIdx.x] = warpScanExclusive(val, s_Data);
}
//return updated warp scans with exclusive scan results
__syncthreads();
return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE];
}
else
{
return warpScanInclusive(idata, s_Data);
}
}
//==============================================================================
//
// IntegralImage.cu
//
//==============================================================================
const Ncv32u NUM_SCAN_THREADS = 256;
const Ncv32u LOG2_NUM_SCAN_THREADS = 8;
template<class T_in, class T_out>
struct _scanElemOp
{
template<bool tbDoSqr>
static inline __host__ __device__ T_out scanElemOp(T_in elem)
{
return scanElemOp( elem, Int2Type<(int)tbDoSqr>() );
}
private:
template <int v> struct Int2Type { enum { value = v }; };
static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<0>)
{
return (T_out)elem;
}
static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<1>)
{
return (T_out)(elem*elem);
}
};
template<class T>
inline __device__ T readElem(T *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs);
template<>
inline __device__ Ncv8u readElem<Ncv8u>(Ncv8u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs)
{
return tex1Dfetch(tex8u, texOffs + srcStride * blockIdx.x + curElemOffs);
}
template<>
inline __device__ Ncv32u readElem<Ncv32u>(Ncv32u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs)
{
return d_src[curElemOffs];
}
template<>
inline __device__ Ncv32f readElem<Ncv32f>(Ncv32f *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs)
{
return d_src[curElemOffs];
}
/**
* \brief Segmented scan kernel
*
* Calculates per-row prefix scans of the input image.
* Out-of-bounds safe: reads 'size' elements, writes 'size+1' elements
*
* \tparam T_in Type of input image elements
* \tparam T_out Type of output image elements
* \tparam T_op Defines an operation to be performed on the input image pixels
*
* \param d_src [IN] Source image pointer
* \param srcWidth [IN] Source image width
* \param srcStride [IN] Source image stride
* \param d_II [OUT] Output image pointer
* \param IIstride [IN] Output image stride
*
* \return None
*/
template <class T_in, class T_out, bool tbDoSqr>
__global__ void scanRows(T_in *d_src, Ncv32u texOffs, Ncv32u srcWidth, Ncv32u srcStride,
T_out *d_II, Ncv32u IIstride)
{
//advance pointers to the current line
if (sizeof(T_in) != 1)
{
d_src += srcStride * blockIdx.x;
}
//for initial image 8bit source we use texref tex8u
d_II += IIstride * blockIdx.x;
Ncv32u numBuckets = (srcWidth + NUM_SCAN_THREADS - 1) >> LOG2_NUM_SCAN_THREADS;
Ncv32u offsetX = 0;
__shared__ T_out shmem[NUM_SCAN_THREADS * 2];
__shared__ T_out carryElem;
carryElem = 0;
__syncthreads();
while (numBuckets--)
{
Ncv32u curElemOffs = offsetX + threadIdx.x;
T_out curScanElem;
T_in curElem;
T_out curElemMod;
if (curElemOffs < srcWidth)
{
//load elements
curElem = readElem<T_in>(d_src, texOffs, srcStride, curElemOffs);
}
curElemMod = _scanElemOp<T_in, T_out>::scanElemOp<tbDoSqr>(curElem);
//inclusive scan
curScanElem = blockScanInclusive<T_out, NUM_SCAN_THREADS>(curElemMod, shmem);
if (curElemOffs <= srcWidth)
{
//make scan exclusive and write the bucket to the output buffer
d_II[curElemOffs] = carryElem + curScanElem - curElemMod;
offsetX += NUM_SCAN_THREADS;
}
//remember last element for subsequent buckets adjustment
__syncthreads();
if (threadIdx.x == NUM_SCAN_THREADS-1)
{
carryElem += curScanElem;
}
__syncthreads();
}
if (offsetX == srcWidth && !threadIdx.x)
{
d_II[offsetX] = carryElem;
}
}
template <bool tbDoSqr, class T_in, class T_out>
NCVStatus scanRowsWrapperDevice(T_in *d_src, Ncv32u srcStride,
T_out *d_dst, Ncv32u dstStride, NcvSize32u roi)
{
cudaChannelFormatDesc cfdTex;
size_t alignmentOffset = 0;
if (sizeof(T_in) == 1)
{
cfdTex = cudaCreateChannelDesc<Ncv8u>();
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR);
if (alignmentOffset > 0)
{
ncvAssertCUDAReturn(cudaUnbindTexture(tex8u), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, alignmentOffset + roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR);
}
}
scanRows
<T_in, T_out, tbDoSqr>
<<<roi.height, NUM_SCAN_THREADS, 0, nppStGetActiveCUDAstream()>>>
(d_src, (Ncv32u)alignmentOffset, roi.width, srcStride, d_dst, dstStride);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
static Ncv32u getPaddedDimension(Ncv32u dim, Ncv32u elemTypeSize, Ncv32u allocatorAlignment)
{
Ncv32u alignMask = allocatorAlignment-1;
Ncv32u inverseAlignMask = ~alignMask;
Ncv32u dimBytes = dim * elemTypeSize;
Ncv32u pitch = (dimBytes + alignMask) & inverseAlignMask;
Ncv32u PaddedDim = pitch / elemTypeSize;
return PaddedDim;
}
template <class T_in, class T_out>
NCVStatus ncvIntegralImage_device(T_in *d_src, Ncv32u srcStep,
T_out *d_dst, Ncv32u dstStep, NcvSize32u roi,
INCVMemAllocator &gpuAllocator)
{
ncvAssertReturn(sizeof(T_out) == sizeof(Ncv32u), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice ||
gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roi.width * sizeof(T_in) &&
dstStep >= (roi.width + 1) * sizeof(T_out) &&
srcStep % sizeof(T_in) == 0 &&
dstStep % sizeof(T_out) == 0, NPPST_INVALID_STEP);
srcStep /= sizeof(T_in);
dstStep /= sizeof(T_out);
Ncv32u WidthII = roi.width + 1;
Ncv32u HeightII = roi.height + 1;
Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment());
Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment());
NCVMatrixAlloc<T_out> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32);
ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCVMatrixAlloc<T_out> Tmp32_2(gpuAllocator, PaddedHeightII32, PaddedWidthII32);
ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_2.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn(Tmp32_1.pitch() * Tmp32_1.height() == Tmp32_2.pitch() * Tmp32_2.height(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat;
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
NCV_SKIP_COND_BEGIN
ncvStat = scanRowsWrapperDevice
<false>
(d_src, srcStep, Tmp32_1.ptr(), PaddedWidthII32, roi);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedWidthII32*sizeof(Ncv32u),
(Ncv32u *)Tmp32_2.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height));
ncvAssertReturnNcvStat(ncvStat);
ncvStat = scanRowsWrapperDevice
<false>
(Tmp32_2.ptr(), PaddedHeightII32, Tmp32_1.ptr(), PaddedHeightII32, NcvSize32u(roi.height, WidthII));
ncvAssertReturnNcvStat(ncvStat);
ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u),
(Ncv32u *)d_dst, dstStep*sizeof(Ncv32u), NcvSize32u(HeightII, WidthII));
ncvAssertReturnNcvStat(ncvStat);
NCV_SKIP_COND_END
return NPPST_SUCCESS;
}
NCVStatus ncvSquaredIntegralImage_device(Ncv8u *d_src, Ncv32u srcStep,
Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roi,
INCVMemAllocator &gpuAllocator)
{
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice ||
gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR);
ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roi.width &&
dstStep >= (roi.width + 1) * sizeof(Ncv64u) &&
dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP);
dstStep /= sizeof(Ncv64u);
Ncv32u WidthII = roi.width + 1;
Ncv32u HeightII = roi.height + 1;
Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment());
Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment());
Ncv32u PaddedWidthII64 = getPaddedDimension(WidthII, sizeof(Ncv64u), gpuAllocator.alignment());
Ncv32u PaddedHeightII64 = getPaddedDimension(HeightII, sizeof(Ncv64u), gpuAllocator.alignment());
Ncv32u PaddedWidthMax = PaddedWidthII32 > PaddedWidthII64 ? PaddedWidthII32 : PaddedWidthII64;
Ncv32u PaddedHeightMax = PaddedHeightII32 > PaddedHeightII64 ? PaddedHeightII32 : PaddedHeightII64;
NCVMatrixAlloc<Ncv32u> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32);
ncvAssertReturn(Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCVMatrixAlloc<Ncv64u> Tmp64(gpuAllocator, PaddedWidthMax, PaddedHeightMax);
ncvAssertReturn(Tmp64.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCVMatrixReuse<Ncv32u> Tmp32_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII32, PaddedHeightII32);
ncvAssertReturn(Tmp32_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR);
NCVMatrixReuse<Ncv64u> Tmp64_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII64, PaddedHeightII64);
ncvAssertReturn(Tmp64_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat;
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
NCV_SKIP_COND_BEGIN
ncvStat = scanRowsWrapperDevice
<true, Ncv8u, Ncv32u>
(d_src, srcStep, Tmp32_2.ptr(), PaddedWidthII32, roi);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = nppiStTranspose_32u_C1R(Tmp32_2.ptr(), PaddedWidthII32*sizeof(Ncv32u),
Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height));
ncvAssertReturnNcvStat(ncvStat);
ncvStat = scanRowsWrapperDevice
<false, Ncv32u, Ncv64u>
(Tmp32_1.ptr(), PaddedHeightII32, Tmp64_2.ptr(), PaddedHeightII64, NcvSize32u(roi.height, WidthII));
ncvAssertReturnNcvStat(ncvStat);
ncvStat = nppiStTranspose_64u_C1R(Tmp64_2.ptr(), PaddedHeightII64*sizeof(Ncv64u),
d_dst, dstStep*sizeof(Ncv64u), NcvSize32u(HeightII, WidthII));
ncvAssertReturnNcvStat(ncvStat);
NCV_SKIP_COND_END
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegralGetSize_8u32u(NcvSize32u roiSize, Ncv32u *pBufsize, cudaDeviceProp &devProp)
{
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvIntegralImage_device((Ncv8u*)NULL, roiSize.width,
(Ncv32u*)NULL, (roiSize.width+1) * sizeof(Ncv32u),
roiSize, gpuCounter);
ncvAssertReturnNcvStat(ncvStat);
*pBufsize = (Ncv32u)gpuCounter.maxSize();
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegralGetSize_32f32f(NcvSize32u roiSize, Ncv32u *pBufsize, cudaDeviceProp &devProp)
{
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvIntegralImage_device((Ncv32f*)NULL, roiSize.width * sizeof(Ncv32f),
(Ncv32f*)NULL, (roiSize.width+1) * sizeof(Ncv32f),
roiSize, gpuCounter);
ncvAssertReturnNcvStat(ncvStat);
*pBufsize = (Ncv32u)gpuCounter.maxSize();
return NPPST_SUCCESS;
}
NCVStatus nppiStSqrIntegralGetSize_8u64u(NcvSize32u roiSize, Ncv32u *pBufsize, cudaDeviceProp &devProp)
{
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvSquaredIntegralImage_device(NULL, roiSize.width,
NULL, (roiSize.width+1) * sizeof(Ncv64u),
roiSize, gpuCounter);
ncvAssertReturnNcvStat(ncvStat);
*pBufsize = (Ncv32u)gpuCounter.maxSize();
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegral_8u32u_C1R(Ncv8u *d_src, Ncv32u srcStep,
Ncv32u *d_dst, Ncv32u dstStep,
NcvSize32u roiSize, Ncv8u *pBuffer,
Ncv32u bufSize, cudaDeviceProp &devProp)
{
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator);
ncvAssertReturnNcvStat(ncvStat);
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegral_32f32f_C1R(Ncv32f *d_src, Ncv32u srcStep,
Ncv32f *d_dst, Ncv32u dstStep,
NcvSize32u roiSize, Ncv8u *pBuffer,
Ncv32u bufSize, cudaDeviceProp &devProp)
{
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator);
ncvAssertReturnNcvStat(ncvStat);
return NPPST_SUCCESS;
}
NCVStatus nppiStSqrIntegral_8u64u_C1R(Ncv8u *d_src, Ncv32u srcStep,
Ncv64u *d_dst, Ncv32u dstStep,
NcvSize32u roiSize, Ncv8u *pBuffer,
Ncv32u bufSize, cudaDeviceProp &devProp)
{
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvSquaredIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator);
ncvAssertReturnNcvStat(ncvStat);
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegral_8u32u_C1R_host(Ncv8u *h_src, Ncv32u srcStep,
Ncv32u *h_dst, Ncv32u dstStep,
NcvSize32u roiSize)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roiSize.width &&
dstStep >= (roiSize.width + 1) * sizeof(Ncv32u) &&
dstStep % sizeof(Ncv32u) == 0, NPPST_INVALID_STEP);
dstStep /= sizeof(Ncv32u);
Ncv32u WidthII = roiSize.width + 1;
Ncv32u HeightII = roiSize.height + 1;
memset(h_dst, 0, WidthII * sizeof(Ncv32u));
for (Ncv32u i=1; i<HeightII; i++)
{
h_dst[i * dstStep] = 0;
for (Ncv32u j=1; j<WidthII; j++)
{
Ncv32u top = h_dst[(i-1) * dstStep + j];
Ncv32u left = h_dst[i * dstStep + (j - 1)];
Ncv32u topleft = h_dst[(i - 1) * dstStep + (j - 1)];
Ncv32u elem = h_src[(i - 1) * srcStep + (j - 1)];
h_dst[i * dstStep + j] = elem + left - topleft + top;
}
}
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegral_32f32f_C1R_host(Ncv32f *h_src, Ncv32u srcStep,
Ncv32f *h_dst, Ncv32u dstStep,
NcvSize32u roiSize)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roiSize.width * sizeof(Ncv32f) &&
dstStep >= (roiSize.width + 1) * sizeof(Ncv32f) &&
srcStep % sizeof(Ncv32f) == 0 &&
dstStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP);
srcStep /= sizeof(Ncv32f);
dstStep /= sizeof(Ncv32f);
Ncv32u WidthII = roiSize.width + 1;
Ncv32u HeightII = roiSize.height + 1;
memset(h_dst, 0, WidthII * sizeof(Ncv32u));
for (Ncv32u i=1; i<HeightII; i++)
{
h_dst[i * dstStep] = 0.0f;
for (Ncv32u j=1; j<WidthII; j++)
{
Ncv32f top = h_dst[(i-1) * dstStep + j];
Ncv32f left = h_dst[i * dstStep + (j - 1)];
Ncv32f topleft = h_dst[(i - 1) * dstStep + (j - 1)];
Ncv32f elem = h_src[(i - 1) * srcStep + (j - 1)];
h_dst[i * dstStep + j] = elem + left - topleft + top;
}
}
return NPPST_SUCCESS;
}
NCVStatus nppiStSqrIntegral_8u64u_C1R_host(Ncv8u *h_src, Ncv32u srcStep,
Ncv64u *h_dst, Ncv32u dstStep,
NcvSize32u roiSize)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roiSize.width &&
dstStep >= (roiSize.width + 1) * sizeof(Ncv64u) &&
dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP);
dstStep /= sizeof(Ncv64u);
Ncv32u WidthII = roiSize.width + 1;
Ncv32u HeightII = roiSize.height + 1;
memset(h_dst, 0, WidthII * sizeof(Ncv64u));
for (Ncv32u i=1; i<HeightII; i++)
{
h_dst[i * dstStep] = 0;
for (Ncv32u j=1; j<WidthII; j++)
{
Ncv64u top = h_dst[(i-1) * dstStep + j];
Ncv64u left = h_dst[i * dstStep + (j - 1)];
Ncv64u topleft = h_dst[(i - 1) * dstStep + (j - 1)];
Ncv64u elem = h_src[(i - 1) * srcStep + (j - 1)];
h_dst[i * dstStep + j] = elem*elem + left - topleft + top;
}
}
return NPPST_SUCCESS;
}
//==============================================================================
//
// Decimate.cu
//
//==============================================================================
const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_X = 32;
const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_Y = 8;
template<class T, NcvBool tbCacheTexture>
__device__ T getElem_Decimate(Ncv32u x, T *d_src);
template<>
__device__ Ncv32u getElem_Decimate<Ncv32u, true>(Ncv32u x, Ncv32u *d_src)
{
return tex1Dfetch(tex32u, x);
}
template<>
__device__ Ncv32u getElem_Decimate<Ncv32u, false>(Ncv32u x, Ncv32u *d_src)
{
return d_src[x];
}
template<>
__device__ Ncv64u getElem_Decimate<Ncv64u, true>(Ncv32u x, Ncv64u *d_src)
{
uint2 tmp = tex1Dfetch(tex64u, x);
Ncv64u res = (Ncv64u)tmp.y;
res <<= 32;
res |= tmp.x;
return res;
}
template<>
__device__ Ncv64u getElem_Decimate<Ncv64u, false>(Ncv32u x, Ncv64u *d_src)
{
return d_src[x];
}
template <class T, NcvBool tbCacheTexture>
__global__ void decimate_C1R(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep,
NcvSize32u dstRoi, Ncv32u scale)
{
int curX = blockIdx.x * blockDim.x + threadIdx.x;
int curY = blockIdx.y * blockDim.y + threadIdx.y;
if (curX >= dstRoi.width || curY >= dstRoi.height)
{
return;
}
d_dst[curY * dstStep + curX] = getElem_Decimate<T, tbCacheTexture>((curY * srcStep + curX) * scale, d_src);
}
template <class T>
static NCVStatus decimateWrapperDevice(T *d_src, Ncv32u srcStep,
T *d_dst, Ncv32u dstStep,
NcvSize32u srcRoi, Ncv32u scale,
NcvBool readThruTexture)
{
ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE);
ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) &&
dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale), NPPST_INVALID_STEP);
srcStep /= sizeof(T);
dstStep /= sizeof(T);
NcvSize32u dstRoi;
dstRoi.width = srcRoi.width / scale;
dstRoi.height = srcRoi.height / scale;
dim3 grid((dstRoi.width + NUM_DOWNSAMPLE_NEAREST_THREADS_X - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_X,
(dstRoi.height + NUM_DOWNSAMPLE_NEAREST_THREADS_Y - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_Y);
dim3 block(NUM_DOWNSAMPLE_NEAREST_THREADS_X, NUM_DOWNSAMPLE_NEAREST_THREADS_Y);
if (!readThruTexture)
{
decimate_C1R
<T, false>
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
(d_src, srcStep, d_dst, dstStep, dstRoi, scale);
}
else
{
cudaChannelFormatDesc cfdTexSrc;
if (sizeof(T) == sizeof(Ncv32u))
{
cfdTexSrc = cudaCreateChannelDesc<Ncv32u>();
size_t alignmentOffset;
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex32u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR);
ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR);
}
else
{
cfdTexSrc = cudaCreateChannelDesc<uint2>();
size_t alignmentOffset;
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex64u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR);
ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR);
}
decimate_C1R
<T, true>
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
(d_src, srcStep, d_dst, dstStep, dstRoi, scale);
}
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
template <class T>
static NCVStatus decimateWrapperHost(T *h_src, Ncv32u srcStep,
T *h_dst, Ncv32u dstStep,
NcvSize32u srcRoi, Ncv32u scale)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(srcRoi.width != 0 && srcRoi.height != 0, NPPST_INVALID_ROI);
ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE);
ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) &&
dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale) &&
srcStep % sizeof(T) == 0 && dstStep % sizeof(T) == 0, NPPST_INVALID_STEP);
srcStep /= sizeof(T);
dstStep /= sizeof(T);
NcvSize32u dstRoi;
dstRoi.width = srcRoi.width / scale;
dstRoi.height = srcRoi.height / scale;
for (Ncv32u i=0; i<dstRoi.height; i++)
{
for (Ncv32u j=0; j<dstRoi.width; j++)
{
h_dst[i*dstStep+j] = h_src[i*scale*srcStep + j*scale];
}
}
return NPPST_SUCCESS;
}
#define implementNppDecimate(bit, typ) \
NCVStatus nppiStDecimate_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \
Ncv##bit##typ *d_dst, Ncv32u dstStep, \
NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) \
{ \
return decimateWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \
(Ncv##bit##u *)d_dst, dstStep, \
srcRoi, scale, readThruTexture); \
}
#define implementNppDecimateHost(bit, typ) \
NCVStatus nppiStDecimate_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \
Ncv##bit##typ *h_dst, Ncv32u dstStep, \
NcvSize32u srcRoi, Ncv32u scale) \
{ \
return decimateWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \
(Ncv##bit##u *)h_dst, dstStep, \
srcRoi, scale); \
}
implementNppDecimate(32, u)
implementNppDecimate(32, s)
implementNppDecimate(32, f)
implementNppDecimate(64, u)
implementNppDecimate(64, s)
implementNppDecimate(64, f)
implementNppDecimateHost(32, u)
implementNppDecimateHost(32, s)
implementNppDecimateHost(32, f)
implementNppDecimateHost(64, u)
implementNppDecimateHost(64, s)
implementNppDecimateHost(64, f)
//==============================================================================
//
// RectStdDev.cu
//
//==============================================================================
const Ncv32u NUM_RECTSTDDEV_THREADS = 128;
template <NcvBool tbCacheTexture>
__device__ Ncv32u getElemSum(Ncv32u x, Ncv32u *d_sum)
{
if (tbCacheTexture)
{
return tex1Dfetch(tex32u, x);
}
else
{
return d_sum[x];
}
}
template <NcvBool tbCacheTexture>
__device__ Ncv64u getElemSqSum(Ncv32u x, Ncv64u *d_sqsum)
{
if (tbCacheTexture)
{
uint2 tmp = tex1Dfetch(tex64u, x);
Ncv64u res = (Ncv64u)tmp.y;
res <<= 32;
res |= tmp.x;
return res;
}
else
{
return d_sqsum[x];
}
}
template <NcvBool tbCacheTexture>
__global__ void rectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep,
Ncv64u *d_sqsum, Ncv32u sqsumStep,
Ncv32f *d_norm, Ncv32u normStep,
NcvSize32u roi, NcvRect32u rect, Ncv32f invRectArea)
{
Ncv32u x_offs = blockIdx.x * NUM_RECTSTDDEV_THREADS + threadIdx.x;
if (x_offs >= roi.width)
{
return;
}
Ncv32u sum_offset = blockIdx.y * sumStep + x_offs;
Ncv32u sqsum_offset = blockIdx.y * sqsumStep + x_offs;
//OPT: try swapping order (could change cache hit/miss ratio)
Ncv32u sum_tl = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x, d_sum);
Ncv32u sum_bl = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x, d_sum);
Ncv32u sum_tr = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x + rect.width, d_sum);
Ncv32u sum_br = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width, d_sum);
Ncv32u sum_val = sum_br + sum_tl - sum_tr - sum_bl;
Ncv64u sqsum_tl, sqsum_bl, sqsum_tr, sqsum_br;
sqsum_tl = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x, d_sqsum);
sqsum_bl = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x, d_sqsum);
sqsum_tr = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x + rect.width, d_sqsum);
sqsum_br = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width, d_sqsum);
Ncv64u sqsum_val = sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl;
Ncv32f mean = sum_val * invRectArea;
//////////////////////////////////////////////////////////////////////////
// sqsum_val_res = sqsum_val / rectArea
//////////////////////////////////////////////////////////////////////////
Ncv32f sqsum_val_1 = __ull2float_rz(sqsum_val);
Ncv64u sqsum_val_2 = __float2ull_rz(sqsum_val_1);
Ncv64u sqsum_val_3 = sqsum_val - sqsum_val_2;
Ncv32f sqsum_val_4 = __ull2float_rn(sqsum_val_3);
sqsum_val_1 *= invRectArea;
sqsum_val_4 *= invRectArea;
Ncv32f sqsum_val_res = sqsum_val_1 + sqsum_val_4;
//////////////////////////////////////////////////////////////////////////
// variance = sqsum_val_res - mean * mean
//////////////////////////////////////////////////////////////////////////
#if defined DISABLE_MAD_SELECTIVELY
Ncv32f variance = sqsum_val_2 - __fmul_rn(mean, mean);
#else
Ncv32f variance = sqsum_val_res - mean * mean;
#endif
//////////////////////////////////////////////////////////////////////////
// stddev = sqrtf(variance)
//////////////////////////////////////////////////////////////////////////
//Ncv32f stddev = sqrtf(variance);
Ncv32f stddev = __fsqrt_rn(variance);
d_norm[blockIdx.y * normStep + x_offs] = stddev;
}
NCVStatus nppiStRectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep,
Ncv64u *d_sqsum, Ncv32u sqsumStep,
Ncv32f *d_norm, Ncv32u normStep,
NcvSize32u roi, NcvRect32u rect,
Ncv32f scaleArea, NcvBool readThruTexture)
{
ncvAssertReturn(d_sum != NULL && d_sqsum != NULL && d_norm != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) &&
sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) &&
normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) &&
sumStep % sizeof(Ncv32u) == 0 &&
sqsumStep % sizeof(Ncv64u) == 0 &&
normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP);
ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE);
sumStep /= sizeof(Ncv32u);
sqsumStep /= sizeof(Ncv64u);
normStep /= sizeof(Ncv32f);
Ncv32f rectArea = rect.width * rect.height * scaleArea;
Ncv32f invRectArea = 1.0f / rectArea;
dim3 grid(((roi.width + NUM_RECTSTDDEV_THREADS - 1) / NUM_RECTSTDDEV_THREADS), roi.height);
dim3 block(NUM_RECTSTDDEV_THREADS);
if (!readThruTexture)
{
rectStdDev_32f_C1R
<false>
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
(d_sum, sumStep, d_sqsum, sqsumStep, d_norm, normStep, roi, rect, invRectArea);
}
else
{
cudaChannelFormatDesc cfdTexSrc;
cudaChannelFormatDesc cfdTexSqr;
cfdTexSrc = cudaCreateChannelDesc<Ncv32u>();
cfdTexSqr = cudaCreateChannelDesc<uint2>();
size_t alignmentOffset;
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex32u, d_sum, cfdTexSrc, (roi.height + rect.y + rect.height) * sumStep * sizeof(Ncv32u)), NPPST_TEXTURE_BIND_ERROR);
ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex64u, d_sqsum, cfdTexSqr, (roi.height + rect.y + rect.height) * sqsumStep * sizeof(Ncv64u)), NPPST_TEXTURE_BIND_ERROR);
ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR);
rectStdDev_32f_C1R
<true>
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
(NULL, sumStep, NULL, sqsumStep, d_norm, normStep, roi, rect, invRectArea);
}
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
NCVStatus nppiStRectStdDev_32f_C1R_host(Ncv32u *h_sum, Ncv32u sumStep,
Ncv64u *h_sqsum, Ncv32u sqsumStep,
Ncv32f *h_norm, Ncv32u normStep,
NcvSize32u roi, NcvRect32u rect,
Ncv32f scaleArea)
{
ncvAssertReturn(h_sum != NULL && h_sqsum != NULL && h_norm != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) &&
sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) &&
normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) &&
sumStep % sizeof(Ncv32u) == 0 &&
sqsumStep % sizeof(Ncv64u) == 0 &&
normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP);
ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE);
sumStep /= sizeof(Ncv32u);
sqsumStep /= sizeof(Ncv64u);
normStep /= sizeof(Ncv32f);
Ncv32f rectArea = rect.width * rect.height * scaleArea;
Ncv32f invRectArea = 1.0f / rectArea;
for (Ncv32u i=0; i<roi.height; i++)
{
for (Ncv32u j=0; j<roi.width; j++)
{
Ncv32u sum_offset = i * sumStep + j;
Ncv32u sqsum_offset = i * sqsumStep + j;
Ncv32u sum_tl = h_sum[sum_offset + rect.y * sumStep + rect.x];
Ncv32u sum_bl = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x];
Ncv32u sum_tr = h_sum[sum_offset + rect.y * sumStep + rect.x + rect.width];
Ncv32u sum_br = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width];
Ncv64f sum_val = sum_br + sum_tl - sum_tr - sum_bl;
Ncv64u sqsum_tl = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x];
Ncv64u sqsum_bl = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x];
Ncv64u sqsum_tr = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x + rect.width];
Ncv64u sqsum_br = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width];
Ncv64f sqsum_val = (Ncv64f)(sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl);
Ncv64f mean = sum_val * invRectArea;
Ncv64f sqsum_val_2 = sqsum_val / rectArea;
Ncv64f variance = sqsum_val_2 - mean * mean;
h_norm[i * normStep + j] = (Ncv32f)sqrt(variance);
}
}
return NPPST_SUCCESS;
}
//==============================================================================
//
// Transpose.cu
//
//==============================================================================
const Ncv32u TRANSPOSE_TILE_DIM = 16;
const Ncv32u TRANSPOSE_BLOCK_ROWS = 16;
/**
* \brief Matrix transpose kernel
*
* Calculates transpose of the input image
* \see TRANSPOSE_TILE_DIM
*
* \tparam T_in Type of input image elements
* \tparam T_out Type of output image elements
*
* \param d_src [IN] Source image pointer
* \param srcStride [IN] Source image stride
* \param d_dst [OUT] Output image pointer
* \param dstStride [IN] Output image stride
*
* \return None
*/
template <class T>
__global__ void transpose(T *d_src, Ncv32u srcStride,
T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi)
{
__shared__ T tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM+1];
Ncv32u blockIdx_x, blockIdx_y;
// do diagonal reordering
if (gridDim.x == gridDim.y)
{
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x;
}
else
{
Ncv32u bid = blockIdx.x + gridDim.x * blockIdx.y;
blockIdx_y = bid % gridDim.y;
blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x;
}
Ncv32u xIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.x;
Ncv32u yIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.y;
Ncv32u index_gmem = xIndex + yIndex * srcStride;
if (xIndex < srcRoi.width)
{
for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if (yIndex + i < srcRoi.height)
{
tile[threadIdx.y+i][threadIdx.x] = d_src[index_gmem+i*srcStride];
}
}
}
__syncthreads();
xIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.x;
yIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.y;
index_gmem = xIndex + yIndex * dstStride;
if (xIndex < srcRoi.height)
{
for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if (yIndex + i < srcRoi.width)
{
d_dst[index_gmem+i*dstStride] = tile[threadIdx.x][threadIdx.y+i];
}
}
}
}
template <class T>
NCVStatus transposeWrapperDevice(T *d_src, Ncv32u srcStride,
T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi)
{
ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) &&
dstStride >= srcRoi.height * sizeof(T) &&
srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP);
srcStride /= sizeof(T);
dstStride /= sizeof(T);
dim3 grid((srcRoi.width + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM,
(srcRoi.height + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM);
dim3 block(TRANSPOSE_TILE_DIM, TRANSPOSE_TILE_DIM);
transpose
<T>
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
(d_src, srcStride, d_dst, dstStride, srcRoi);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
template <class T>
static NCVStatus transposeWrapperHost(T *h_src, Ncv32u srcStride,
T *h_dst, Ncv32u dstStride, NcvSize32u srcRoi)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) &&
dstStride >= srcRoi.height * sizeof(T) &&
srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP);
srcStride /= sizeof(T);
dstStride /= sizeof(T);
for (Ncv32u i=0; i<srcRoi.height; i++)
{
for (Ncv32u j=0; j<srcRoi.width; j++)
{
h_dst[j*dstStride+i] = h_src[i*srcStride + j];
}
}
return NPPST_SUCCESS;
}
#define implementNppTranspose(bit, typ) \
NCVStatus nppiStTranspose_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \
Ncv##bit##typ *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) \
{ \
return transposeWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \
(Ncv##bit##u *)d_dst, dstStep, srcRoi); \
}
#define implementNppTransposeHost(bit, typ) \
NCVStatus nppiStTranspose_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \
Ncv##bit##typ *h_dst, Ncv32u dstStep, \
NcvSize32u srcRoi) \
{ \
return transposeWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \
(Ncv##bit##u *)h_dst, dstStep, srcRoi); \
}
implementNppTranspose(32,u)
implementNppTranspose(32,s)
implementNppTranspose(32,f)
implementNppTranspose(64,u)
implementNppTranspose(64,s)
implementNppTranspose(64,f)
implementNppTransposeHost(32,u)
implementNppTransposeHost(32,s)
implementNppTransposeHost(32,f)
implementNppTransposeHost(64,u)
implementNppTransposeHost(64,s)
implementNppTransposeHost(64,f)
NCVStatus nppiStTranspose_128_C1R(void *d_src, Ncv32u srcStep,
void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi)
{
return transposeWrapperDevice<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi);
}
NCVStatus nppiStTranspose_128_C1R_host(void *d_src, Ncv32u srcStep,
void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi)
{
return transposeWrapperHost<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi);
}
//==============================================================================
//
// Compact.cu
//
//==============================================================================
const Ncv32u NUM_REMOVE_THREADS = 256;
template <bool bRemove, bool bWritePartial>
__global__ void removePass1Scan(Ncv32u *d_src, Ncv32u srcLen,
Ncv32u *d_offsets, Ncv32u *d_blockSums,
Ncv32u elemRemove)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x;
if (elemAddrIn > srcLen + blockDim.x)
{
return;
}
__shared__ Ncv32u shmem[NUM_REMOVE_THREADS * 2];
Ncv32u scanElem = 0;
if (elemAddrIn < srcLen)
{
if (bRemove)
{
scanElem = (d_src[elemAddrIn] != elemRemove) ? 1 : 0;
}
else
{
scanElem = d_src[elemAddrIn];
}
}
Ncv32u localScanInc = blockScanInclusive<Ncv32u, NUM_REMOVE_THREADS>(scanElem, shmem);
__syncthreads();
if (elemAddrIn < srcLen)
{
if (threadIdx.x == NUM_REMOVE_THREADS-1 && bWritePartial)
{
d_blockSums[blockId] = localScanInc;
}
if (bRemove)
{
d_offsets[elemAddrIn] = localScanInc - scanElem;
}
else
{
d_src[elemAddrIn] = localScanInc - scanElem;
}
}
}
__global__ void removePass2Adjust(Ncv32u *d_offsets, Ncv32u srcLen, Ncv32u *d_blockSums)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x;
if (elemAddrIn >= srcLen)
{
return;
}
__shared__ Ncv32u valOffs;
valOffs = d_blockSums[blockId];
__syncthreads();
d_offsets[elemAddrIn] += valOffs;
}
__global__ void removePass3Compact(Ncv32u *d_src, Ncv32u srcLen,
Ncv32u *d_offsets, Ncv32u *d_dst,
Ncv32u elemRemove, Ncv32u *dstLenValue)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x;
if (elemAddrIn >= srcLen)
{
return;
}
Ncv32u elem = d_src[elemAddrIn];
Ncv32u elemAddrOut = d_offsets[elemAddrIn];
if (elem != elemRemove)
{
d_dst[elemAddrOut] = elem;
}
if (elemAddrIn == srcLen-1)
{
if (elem != elemRemove)
{
*dstLenValue = elemAddrOut + 1;
}
else
{
*dstLenValue = elemAddrOut;
}
}
}
NCVStatus compactVector_32u_device(Ncv32u *d_src, Ncv32u srcLen,
Ncv32u *d_dst, Ncv32u *dstLenPinned,
Ncv32u elemRemove,
INCVMemAllocator &gpuAllocator)
{
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR);
if (srcLen == 0)
{
if (dstLenPinned != NULL)
{
*dstLenPinned = 0;
}
return NPPST_SUCCESS;
}
std::vector<Ncv32u> partSumNums;
std::vector<Ncv32u> partSumOffsets;
Ncv32u partSumLastNum = srcLen;
Ncv32u partSumLastOffs = 0;
do
{
partSumNums.push_back(partSumLastNum);
partSumOffsets.push_back(partSumLastOffs);
Ncv32u curPartSumAlignedLength = alignUp(partSumLastNum * sizeof(Ncv32u),
gpuAllocator.alignment()) / sizeof(Ncv32u);
partSumLastOffs += curPartSumAlignedLength;
partSumLastNum = (partSumLastNum + NUM_REMOVE_THREADS - 1) / NUM_REMOVE_THREADS;
}
while (partSumLastNum>1);
partSumNums.push_back(partSumLastNum);
partSumOffsets.push_back(partSumLastOffs);
NCVVectorAlloc<Ncv32u> d_hierSums(gpuAllocator, partSumLastOffs+1);
ncvAssertReturn(gpuAllocator.isCounting() || d_hierSums.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCVVectorAlloc<Ncv32u> d_numDstElements(gpuAllocator, 1);
ncvAssertReturn(gpuAllocator.isCounting() || d_numDstElements.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
NCV_SKIP_COND_BEGIN
dim3 block(NUM_REMOVE_THREADS);
//calculate zero-level partial sums for indices calculation
if (partSumNums.size() > 2)
{
dim3 grid(partSumNums[1]);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
removePass1Scan
<true, true>
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
(d_src, srcLen,
d_hierSums.ptr(),
d_hierSums.ptr() + partSumOffsets[1],
elemRemove);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
//calculate hierarchical partial sums
for (Ncv32u i=1; i<partSumNums.size()-1; i++)
{
dim3 grid_partial(partSumNums[i+1]);
if (grid_partial.x > 65535)
{
grid_partial.y = (grid_partial.x + 65534) / 65535;
grid_partial.x = 65535;
}
if (grid_partial.x != 1)
{
removePass1Scan
<false, true>
<<<grid_partial, block, 0, nppStGetActiveCUDAstream()>>>
(d_hierSums.ptr() + partSumOffsets[i],
partSumNums[i], NULL,
d_hierSums.ptr() + partSumOffsets[i+1],
0);
}
else
{
removePass1Scan
<false, false>
<<<grid_partial, block, 0, nppStGetActiveCUDAstream()>>>
(d_hierSums.ptr() + partSumOffsets[i],
partSumNums[i], NULL,
NULL,
0);
}
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
}
//adjust hierarchical partial sums
for (Ncv32s i=(Ncv32s)partSumNums.size()-3; i>=0; i--)
{
dim3 grid_local(partSumNums[i+1]);
if (grid_local.x > 65535)
{
grid_local.y = (grid_local.x + 65534) / 65535;
grid_local.x = 65535;
}
removePass2Adjust
<<<grid_local, block, 0, nppStGetActiveCUDAstream()>>>
(d_hierSums.ptr() + partSumOffsets[i], partSumNums[i],
d_hierSums.ptr() + partSumOffsets[i+1]);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
}
}
else
{
dim3 grid_local(partSumNums[1]);
removePass1Scan
<true, false>
<<<grid_local, block, 0, nppStGetActiveCUDAstream()>>>
(d_src, srcLen,
d_hierSums.ptr(),
NULL, elemRemove);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
}
//compact source vector using indices
dim3 grid(partSumNums[1]);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
removePass3Compact
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
(d_src, srcLen, d_hierSums.ptr(), d_dst,
elemRemove, d_numDstElements.ptr());
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
//get number of dst elements
if (dstLenPinned != NULL)
{
ncvAssertCUDAReturn(cudaMemcpyAsync(dstLenPinned, d_numDstElements.ptr(), sizeof(Ncv32u),
cudaMemcpyDeviceToHost, nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR);
}
NCV_SKIP_COND_END
return NPPST_SUCCESS;
}
NCVStatus nppsStCompactGetSize_32u(Ncv32u srcLen, Ncv32u *pBufsize, cudaDeviceProp &devProp)
{
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
if (srcLen == 0)
{
*pBufsize = 0;
return NPPST_SUCCESS;
}
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = compactVector_32u_device(NULL, srcLen, NULL, NULL, 0xC001C0DE,
gpuCounter);
ncvAssertReturnNcvStat(ncvStat);
*pBufsize = (Ncv32u)gpuCounter.maxSize();
return NPPST_SUCCESS;
}
NCVStatus nppsStCompactGetSize_32s(Ncv32u srcLen, Ncv32u *pBufsize, cudaDeviceProp &devProp)
{
return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp);
}
NCVStatus nppsStCompactGetSize_32f(Ncv32u srcLen, Ncv32u *pBufsize, cudaDeviceProp &devProp)
{
return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp);
}
NCVStatus nppsStCompact_32u(Ncv32u *d_src, Ncv32u srcLen,
Ncv32u *d_dst, Ncv32u *p_dstLen,
Ncv32u elemRemove, Ncv8u *pBuffer,
Ncv32u bufSize, cudaDeviceProp &devProp)
{
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = compactVector_32u_device(d_src, srcLen, d_dst, p_dstLen, elemRemove,
gpuAllocator);
ncvAssertReturnNcvStat(ncvStat);
return NPPST_SUCCESS;
}
NCVStatus nppsStCompact_32s(Ncv32s *d_src, Ncv32u srcLen,
Ncv32s *d_dst, Ncv32u *p_dstLen,
Ncv32s elemRemove, Ncv8u *pBuffer,
Ncv32u bufSize, cudaDeviceProp &devProp)
{
return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen,
*(Ncv32u *)&elemRemove, pBuffer, bufSize, devProp);
}
#if defined __GNUC__ && __GNUC__ > 2 && __GNUC_MINOR__ > 4
typedef Ncv32u __attribute__((__may_alias__)) Ncv32u_a;
#else
typedef Ncv32u Ncv32u_a;
#endif
NCVStatus nppsStCompact_32f(Ncv32f *d_src, Ncv32u srcLen,
Ncv32f *d_dst, Ncv32u *p_dstLen,
Ncv32f elemRemove, Ncv8u *pBuffer,
Ncv32u bufSize, cudaDeviceProp &devProp)
{
return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen,
*(Ncv32u_a *)&elemRemove, pBuffer, bufSize, devProp);
}
NCVStatus nppsStCompact_32u_host(Ncv32u *h_src, Ncv32u srcLen,
Ncv32u *h_dst, Ncv32u *dstLen, Ncv32u elemRemove)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
if (srcLen == 0)
{
if (dstLen != NULL)
{
*dstLen = 0;
}
return NPPST_SUCCESS;
}
Ncv32u dstIndex = 0;
for (Ncv32u srcIndex=0; srcIndex<srcLen; srcIndex++)
{
if (h_src[srcIndex] != elemRemove)
{
h_dst[dstIndex++] = h_src[srcIndex];
}
}
if (dstLen != NULL)
{
*dstLen = dstIndex;
}
return NPPST_SUCCESS;
}
NCVStatus nppsStCompact_32s_host(Ncv32s *h_src, Ncv32u srcLen,
Ncv32s *h_dst, Ncv32u *dstLen, Ncv32s elemRemove)
{
return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u_a *)&elemRemove);
}
NCVStatus nppsStCompact_32f_host(Ncv32f *h_src, Ncv32u srcLen,
Ncv32f *h_dst, Ncv32u *dstLen, Ncv32f elemRemove)
{
return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u_a *)&elemRemove);
}
//==============================================================================
//
// Filter.cu
//
//==============================================================================
texture <float, 1, cudaReadModeElementType> texSrc;
texture <float, 1, cudaReadModeElementType> texKernel;
__forceinline__ __device__ float getValueMirrorRow(const int rowOffset,
int i,
int w)
{
if (i < 0) i = 1 - i;
if (i >= w) i = w + w - i - 1;
return tex1Dfetch (texSrc, rowOffset + i);
}
__forceinline__ __device__ float getValueMirrorColumn(const int offset,
const int rowStep,
int j,
int h)
{
if (j < 0) j = 1 - j;
if (j >= h) j = h + h - j - 1;
return tex1Dfetch (texSrc, offset + j * rowStep);
}
__global__ void FilterRowBorderMirror_32f_C1R(Ncv32u srcStep,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u dstStep,
NcvRect32u roi,
Ncv32s nKernelSize,
Ncv32s nAnchor,
Ncv32f multiplier)
{
// position within ROI
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix >= roi.width || iy >= roi.height)
{
return;
}
const int p = nKernelSize - nAnchor - 1;
const int j = roi.y + iy;
const int rowOffset = j * srcStep + roi.x;
float sum = 0.0f;
for (int m = 0; m < nKernelSize; ++m)
{
sum += getValueMirrorRow (rowOffset, ix + m - p, roi.width)
* tex1Dfetch (texKernel, m);
}
pDst[iy * dstStep + ix] = sum * multiplier;
}
__global__ void FilterColumnBorderMirror_32f_C1R(Ncv32u srcStep,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u dstStep,
NcvRect32u roi,
Ncv32s nKernelSize,
Ncv32s nAnchor,
Ncv32f multiplier)
{
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix >= roi.width || iy >= roi.height)
{
return;
}
const int p = nKernelSize - nAnchor - 1;
const int i = roi.x + ix;
const int offset = i + roi.y * srcStep;
float sum = 0.0f;
for (int m = 0; m < nKernelSize; ++m)
{
sum += getValueMirrorColumn (offset, srcStep, iy + m - p, roi.height)
* tex1Dfetch (texKernel, m);
}
pDst[ix + iy * dstStep] = sum * multiplier;
}
NCVStatus nppiStFilterRowBorder_32f_C1R(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u nDstStep,
NcvRect32u oROI,
NppStBorderType borderType,
const Ncv32f *pKernel,
Ncv32s nKernelSize,
Ncv32s nAnchor,
Ncv32f multiplier)
{
ncvAssertReturn (pSrc != NULL &&
pDst != NULL &&
pKernel != NULL, NCV_NULL_PTR);
ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep &&
dstSize.width * sizeof (Ncv32f) <= nDstStep &&
oROI.width * sizeof (Ncv32f) <= nSrcStep &&
oROI.width * sizeof (Ncv32f) <= nDstStep &&
nSrcStep % sizeof (Ncv32f) == 0 &&
nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u dstStep = nDstStep / sizeof (Ncv32f);
// adjust ROI size to be within source image
if (oROI.x + oROI.width > srcSize.width)
{
oROI.width = srcSize.width - oROI.x;
}
if (oROI.y + oROI.height > srcSize.height)
{
oROI.height = srcSize.height - oROI.y;
}
cudaChannelFormatDesc floatChannel = cudaCreateChannelDesc <float> ();
texSrc.normalized = false;
texKernel.normalized = false;
cudaBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep);
cudaBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f));
dim3 ctaSize (32, 6);
dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x,
(oROI.height + ctaSize.y - 1) / ctaSize.y);
switch (borderType)
{
case nppStBorderNone:
return NPPST_ERROR;
case nppStBorderClamp:
return NPPST_ERROR;
case nppStBorderWrap:
return NPPST_ERROR;
case nppStBorderMirror:
FilterRowBorderMirror_32f_C1R <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>>
(srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
break;
default:
return NPPST_ERROR;
}
return NPPST_SUCCESS;
}
NCVStatus nppiStFilterColumnBorder_32f_C1R(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u nDstStep,
NcvRect32u oROI,
NppStBorderType borderType,
const Ncv32f *pKernel,
Ncv32s nKernelSize,
Ncv32s nAnchor,
Ncv32f multiplier)
{
ncvAssertReturn (pSrc != NULL &&
pDst != NULL &&
pKernel != NULL, NCV_NULL_PTR);
ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep &&
dstSize.width * sizeof (Ncv32f) <= nDstStep &&
oROI.width * sizeof (Ncv32f) <= nSrcStep &&
oROI.width * sizeof (Ncv32f) <= nDstStep &&
nSrcStep % sizeof (Ncv32f) == 0 &&
nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u dstStep = nDstStep / sizeof (Ncv32f);
// adjust ROI size to be within source image
if (oROI.x + oROI.width > srcSize.width)
{
oROI.width = srcSize.width - oROI.x;
}
if (oROI.y + oROI.height > srcSize.height)
{
oROI.height = srcSize.height - oROI.y;
}
cudaChannelFormatDesc floatChannel = cudaCreateChannelDesc <float> ();
texSrc.normalized = false;
texKernel.normalized = false;
cudaBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep);
cudaBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f));
dim3 ctaSize (32, 6);
dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x,
(oROI.height + ctaSize.y - 1) / ctaSize.y);
switch (borderType)
{
case nppStBorderClamp:
return NPPST_ERROR;
case nppStBorderWrap:
return NPPST_ERROR;
case nppStBorderMirror:
FilterColumnBorderMirror_32f_C1R <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>>
(srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
break;
default:
return NPPST_ERROR;
}
return NPPST_SUCCESS;
}
//==============================================================================
//
// FrameInterpolate.cu
//
//==============================================================================
inline Ncv32u iDivUp(Ncv32u num, Ncv32u denom)
{
return (num + denom - 1)/denom;
}
texture<float, 2, cudaReadModeElementType> tex_src1;
texture<float, 2, cudaReadModeElementType> tex_src0;
__global__ void BlendFramesKernel(const float *u, const float *v, // forward flow
const float *ur, const float *vr, // backward flow
const float *o0, const float *o1, // coverage masks
int w, int h, int s,
float theta, float *out)
{
const int ix = threadIdx.x + blockDim.x * blockIdx.x;
const int iy = threadIdx.y + blockDim.y * blockIdx.y;
const int pos = ix + s * iy;
if (ix >= w || iy >= h) return;
float _u = u[pos];
float _v = v[pos];
float _ur = ur[pos];
float _vr = vr[pos];
float x = (float)ix + 0.5f;
float y = (float)iy + 0.5f;
bool b0 = o0[pos] > 1e-4f;
bool b1 = o1[pos] > 1e-4f;
if (b0 && b1)
{
// pixel is visible on both frames
out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta) * (1.0f - theta) +
tex2D(tex_src1, x + _u * (1.0f - theta), y + _v * (1.0f - theta)) * theta;
}
else if (b0)
{
// visible on the first frame only
out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta);
}
else
{
// visible on the second frame only
out[pos] = tex2D(tex_src1, x - _ur * (1.0f - theta), y - _vr * (1.0f - theta));
}
}
NCVStatus BlendFrames(const Ncv32f *src0,
const Ncv32f *src1,
const Ncv32f *ufi,
const Ncv32f *vfi,
const Ncv32f *ubi,
const Ncv32f *vbi,
const Ncv32f *o1,
const Ncv32f *o2,
Ncv32u width,
Ncv32u height,
Ncv32u stride,
Ncv32f theta,
Ncv32f *out)
{
tex_src1.addressMode[0] = cudaAddressModeClamp;
tex_src1.addressMode[1] = cudaAddressModeClamp;
tex_src1.filterMode = cudaFilterModeLinear;
tex_src1.normalized = false;
tex_src0.addressMode[0] = cudaAddressModeClamp;
tex_src0.addressMode[1] = cudaAddressModeClamp;
tex_src0.filterMode = cudaFilterModeLinear;
tex_src0.normalized = false;
cudaChannelFormatDesc desc = cudaCreateChannelDesc <float> ();
const Ncv32u pitch = stride * sizeof (float);
ncvAssertCUDAReturn (cudaBindTexture2D (0, tex_src1, src1, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR);
ncvAssertCUDAReturn (cudaBindTexture2D (0, tex_src0, src0, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR);
dim3 threads (32, 4);
dim3 blocks (iDivUp (width, threads.x), iDivUp (height, threads.y));
BlendFramesKernel<<<blocks, threads, 0, nppStGetActiveCUDAstream ()>>>
(ufi, vfi, ubi, vbi, o1, o2, width, height, stride, theta, out);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
NCVStatus nppiStGetInterpolationBufferSize(NcvSize32u srcSize,
Ncv32u nStep,
Ncv32u *hpSize)
{
NCVStatus status = NPPST_ERROR;
status = nppiStVectorWarpGetBufferSize(srcSize, nStep, hpSize);
return status;
}
NCVStatus nppiStInterpolateFrames(const NppStInterpolationState *pState)
{
// check state validity
ncvAssertReturn (pState->pSrcFrame0 != 0 &&
pState->pSrcFrame1 != 0 &&
pState->pFU != 0 &&
pState->pFV != 0 &&
pState->pBU != 0 &&
pState->pBV != 0 &&
pState->pNewFrame != 0 &&
pState->ppBuffers[0] != 0 &&
pState->ppBuffers[1] != 0 &&
pState->ppBuffers[2] != 0 &&
pState->ppBuffers[3] != 0 &&
pState->ppBuffers[4] != 0 &&
pState->ppBuffers[5] != 0, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (pState->size.width > 0 &&
pState->size.height > 0, NPPST_ERROR);
ncvAssertReturn (pState->nStep >= pState->size.width * sizeof (Ncv32f) &&
pState->nStep > 0 &&
pState->nStep % sizeof (Ncv32f) == 0,
NPPST_INVALID_STEP);
// change notation
Ncv32f *cov0 = pState->ppBuffers[0];
Ncv32f *cov1 = pState->ppBuffers[1];
Ncv32f *fwdU = pState->ppBuffers[2]; // forward u
Ncv32f *fwdV = pState->ppBuffers[3]; // forward v
Ncv32f *bwdU = pState->ppBuffers[4]; // backward u
Ncv32f *bwdV = pState->ppBuffers[5]; // backward v
// warp flow
ncvAssertReturnNcvStat (
nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFU,
pState->size,
pState->nStep,
pState->pFU,
pState->pFV,
pState->nStep,
cov0,
pState->pos,
fwdU) );
ncvAssertReturnNcvStat (
nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFV,
pState->size,
pState->nStep,
pState->pFU,
pState->pFV,
pState->nStep,
cov0,
pState->pos,
fwdV) );
// warp backward flow
ncvAssertReturnNcvStat (
nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBU,
pState->size,
pState->nStep,
pState->pBU,
pState->pBV,
pState->nStep,
cov1,
1.0f - pState->pos,
bwdU) );
ncvAssertReturnNcvStat (
nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBV,
pState->size,
pState->nStep,
pState->pBU,
pState->pBV,
pState->nStep,
cov1,
1.0f - pState->pos,
bwdU) );
// interpolate frame
ncvAssertReturnNcvStat (
BlendFrames (pState->pSrcFrame0,
pState->pSrcFrame1,
fwdU,
fwdV,
bwdU,
bwdV,
cov0,
cov1,
pState->size.width,
pState->size.height,
pState->nStep / sizeof (Ncv32f),
pState->pos,
pState->pNewFrame) );
return NPPST_SUCCESS;
}
//==============================================================================
//
// VectorWarpFrame.cu
//
//==============================================================================
#if __CUDA_ARCH__ < 200
// FP32 atomic add
static __forceinline__ __device__ float _atomicAdd(float *addr, float val)
{
float old = *addr, assumed;
do {
assumed = old;
old = int_as_float(__iAtomicCAS((int*)addr,
float_as_int(assumed),
float_as_int(val+assumed)));
} while( assumed!=old );
return old;
}
#else
#define _atomicAdd atomicAdd
#endif
__global__ void ForwardWarpKernel_PSF2x2(const float *u,
const float *v,
const float *src,
const int w,
const int h,
const int flow_stride,
const int image_stride,
const float time_scale,
float *normalization_factor,
float *dst)
{
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
if (i >= h || j >= w) return;
int flow_row_offset = i * flow_stride;
int image_row_offset = i * image_stride;
//bottom left corner of a target pixel
float cx = u[flow_row_offset + j] * time_scale + (float)j + 1.0f;
float cy = v[flow_row_offset + j] * time_scale + (float)i + 1.0f;
// pixel containing bottom left corner
float px;
float py;
float dx = modff (cx, &px);
float dy = modff (cy, &py);
// target pixel integer coords
int tx;
int ty;
tx = (int) px;
ty = (int) py;
float value = src[image_row_offset + j];
float weight;
// fill pixel containing bottom right corner
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
weight = dx * dy;
_atomicAdd (dst + ty * image_stride + tx, value * weight);
_atomicAdd (normalization_factor + ty * image_stride + tx, weight);
}
// fill pixel containing bottom left corner
tx -= 1;
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
weight = (1.0f - dx) * dy;
_atomicAdd (dst + ty * image_stride + tx, value * weight);
_atomicAdd (normalization_factor + ty * image_stride + tx, weight);
}
// fill pixel containing upper left corner
ty -= 1;
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
weight = (1.0f - dx) * (1.0f - dy);
_atomicAdd (dst + ty * image_stride + tx, value * weight);
_atomicAdd (normalization_factor + ty * image_stride + tx, weight);
}
// fill pixel containing upper right corner
tx += 1;
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
weight = dx * (1.0f - dy);
_atomicAdd (dst + ty * image_stride + tx, value * weight);
_atomicAdd (normalization_factor + ty * image_stride + tx, weight);
}
}
__global__ void ForwardWarpKernel_PSF1x1(const float *u,
const float *v,
const float *src,
const int w,
const int h,
const int flow_stride,
const int image_stride,
const float time_scale,
float *dst)
{
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
if (i >= h || j >= w) return;
int flow_row_offset = i * flow_stride;
int image_row_offset = i * image_stride;
float u_ = u[flow_row_offset + j];
float v_ = v[flow_row_offset + j];
//bottom left corner of target pixel
float cx = u_ * time_scale + (float)j + 1.0f;
float cy = v_ * time_scale + (float)i + 1.0f;
// pixel containing bottom left corner
int tx = __float2int_rn (cx);
int ty = __float2int_rn (cy);
float value = src[image_row_offset + j];
// fill pixel
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
_atomicAdd (dst + ty * image_stride + tx, value);
}
}
__global__ void NormalizeKernel(const float *normalization_factor, int w, int h, int s, float *image)
{
int i = threadIdx.y + blockDim.y * blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= h || j >= w) return;
const int pos = i * s + j;
float scale = normalization_factor[pos];
float invScale = (scale == 0.0f) ? 1.0f : (1.0f / scale);
image[pos] *= invScale;
}
__global__ void MemsetKernel(const float value, int w, int h, float *image)
{
int i = threadIdx.y + blockDim.y * blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= h || j >= w) return;
const int pos = i * w + j;
image[pos] = value;
}
NCVStatus nppiStVectorWarpGetBufferSize (NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32u *hpSize)
{
ncvAssertReturn (hpSize != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep,
NPPST_INVALID_STEP);
*hpSize = nSrcStep * srcSize.height;
return NPPST_SUCCESS;
}
// does not require normalization
NCVStatus nppiStVectorWarp_PSF1x1_32f_C1(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
const Ncv32f *pU,
const Ncv32f *pV,
Ncv32u nVFStep,
Ncv32f timeScale,
Ncv32f *pDst)
{
ncvAssertReturn (pSrc != NULL &&
pU != NULL &&
pV != NULL &&
pDst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep &&
srcSize.width * sizeof (Ncv32f) <= nVFStep,
NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u vfStep = nVFStep / sizeof (Ncv32f);
dim3 ctaSize (32, 6);
dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y));
ForwardWarpKernel_PSF1x1 <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>>
(pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pDst);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
NCVStatus nppiStVectorWarp_PSF2x2_32f_C1(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
const Ncv32f *pU,
const Ncv32f *pV,
Ncv32u nVFStep,
Ncv32f *pBuffer,
Ncv32f timeScale,
Ncv32f *pDst)
{
ncvAssertReturn (pSrc != NULL &&
pU != NULL &&
pV != NULL &&
pDst != NULL &&
pBuffer != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep &&
srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u vfStep = nVFStep / sizeof(Ncv32f);
dim3 ctaSize(32, 6);
dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y));
MemsetKernel <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>>
(0, srcSize.width, srcSize.height, pBuffer);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
ForwardWarpKernel_PSF2x2 <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>>
(pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pBuffer, pDst);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
NormalizeKernel <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>>
(pBuffer, srcSize.width, srcSize.height, srcStep, pDst);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
//==============================================================================
//
// Resize.cu
//
//==============================================================================
texture <float, 2, cudaReadModeElementType> texSrc2D;
__forceinline__
__device__ float processLine(int spos,
float xmin,
float xmax,
int ixmin,
int ixmax,
float fxmin,
float cxmax)
{
// first element
float wsum = 1.0f - xmin + fxmin;
float sum = tex1Dfetch(texSrc, spos) * (1.0f - xmin + fxmin);
spos++;
for (int ix = ixmin + 1; ix < ixmax; ++ix)
{
sum += tex1Dfetch(texSrc, spos);
spos++;
wsum += 1.0f;
}
sum += tex1Dfetch(texSrc, spos) * (cxmax - xmax);
wsum += cxmax - xmax;
return sum / wsum;
}
__global__ void resizeSuperSample_32f(NcvSize32u srcSize,
Ncv32u srcStep,
NcvRect32u srcROI,
Ncv32f *dst,
NcvSize32u dstSize,
Ncv32u dstStep,
NcvRect32u dstROI,
Ncv32f scaleX,
Ncv32f scaleY)
{
// position within dst ROI
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
const int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= dstROI.width || iy >= dstROI.height)
{
return;
}
float rw = (float) srcROI.width;
float rh = (float) srcROI.height;
// source position
float x = scaleX * (float) ix;
float y = scaleY * (float) iy;
// x sampling range
float xBegin = fmax (x - scaleX, 0.0f);
float xEnd = fmin (x + scaleX, rw - 1.0f);
// y sampling range
float yBegin = fmax (y - scaleY, 0.0f);
float yEnd = fmin (y + scaleY, rh - 1.0f);
// x range of source samples
float floorXBegin = floorf (xBegin);
float ceilXEnd = ceilf (xEnd);
int iXBegin = srcROI.x + (int) floorXBegin;
int iXEnd = srcROI.x + (int) ceilXEnd;
// y range of source samples
float floorYBegin = floorf (yBegin);
float ceilYEnd = ceilf (yEnd);
int iYBegin = srcROI.y + (int) floorYBegin;
int iYEnd = srcROI.y + (int) ceilYEnd;
// first row
int pos = iYBegin * srcStep + iXBegin;
float wsum = 1.0f - yBegin + floorYBegin;
float sum = processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin,
ceilXEnd) * (1.0f - yBegin + floorYBegin);
pos += srcStep;
for (int iy = iYBegin + 1; iy < iYEnd; ++iy)
{
sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin,
ceilXEnd);
pos += srcStep;
wsum += 1.0f;
}
sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin,
ceilXEnd) * (ceilYEnd - yEnd);
wsum += ceilYEnd - yEnd;
sum /= wsum;
dst[(ix + dstROI.x) + (iy + dstROI.y) * dstStep] = sum;
}
// bicubic interpolation
__forceinline__
__device__ float bicubicCoeff(float x_)
{
float x = fabsf(x_);
if (x <= 1.0f)
{
return x * x * (1.5f * x - 2.5f) + 1.0f;
}
else if (x < 2.0f)
{
return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f;
}
else
{
return 0.0f;
}
}
__global__ void resizeBicubic(NcvSize32u srcSize,
NcvRect32u srcROI,
NcvSize32u dstSize,
Ncv32u dstStep,
Ncv32f *dst,
NcvRect32u dstROI,
Ncv32f scaleX,
Ncv32f scaleY)
{
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
const int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= dstROI.width || iy >= dstROI.height)
{
return;
}
const float dx = 1.0f / srcROI.width;
const float dy = 1.0f / srcROI.height;
float rx = (float) srcROI.x;
float ry = (float) srcROI.y;
float rw = (float) srcROI.width;
float rh = (float) srcROI.height;
float x = scaleX * (float) ix;
float y = scaleY * (float) iy;
// sampling range
// border mode is clamp
float xmin = fmax (ceilf (x - 2.0f), 0.0f);
float xmax = fmin (floorf (x + 2.0f), rw - 1.0f);
float ymin = fmax (ceilf (y - 2.0f), 0.0f);
float ymax = fmin (floorf (y + 2.0f), rh - 1.0f);
// shift data window to match ROI
rx += 0.5f;
ry += 0.5f;
x += rx;
y += ry;
xmin += rx;
xmax += rx;
ymin += ry;
ymax += ry;
float sum = 0.0f;
float wsum = 0.0f;
for (float cy = ymin; cy <= ymax; cy += 1.0f)
{
for (float cx = xmin; cx <= xmax; cx += 1.0f)
{
float xDist = x - cx;
float yDist = y - cy;
float wx = bicubicCoeff (xDist);
float wy = bicubicCoeff (yDist);
wx *= wy;
sum += wx * tex2D (texSrc2D, cx * dx, cy * dy);
wsum += wx;
}
}
dst[(ix + dstROI.x)+ (iy + dstROI.y) * dstStep] = (!wsum)? 0 : sum / wsum;
}
NCVStatus nppiStResize_32f_C1R(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
NcvRect32u srcROI,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u nDstStep,
NcvRect32u dstROI,
Ncv32f xFactor,
Ncv32f yFactor,
NppStInterpMode interpolation)
{
NCVStatus status = NPPST_SUCCESS;
ncvAssertReturn (pSrc != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (xFactor != 0.0 && yFactor != 0.0, NPPST_INVALID_SCALE);
ncvAssertReturn (nSrcStep >= sizeof (Ncv32f) * (Ncv32u) srcSize.width &&
nDstStep >= sizeof (Ncv32f) * (Ncv32f) dstSize.width,
NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u dstStep = nDstStep / sizeof (Ncv32f);
// TODO: preprocess ROI to prevent out of bounds access
if (interpolation == nppStSupersample)
{
// bind texture
cudaBindTexture (0, texSrc, pSrc, srcSize.height * nSrcStep);
// invoke kernel
dim3 ctaSize (32, 6);
dim3 gridSize ((dstROI.width + ctaSize.x - 1) / ctaSize.x,
(dstROI.height + ctaSize.y - 1) / ctaSize.y);
resizeSuperSample_32f <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>>
(srcSize, srcStep, srcROI, pDst, dstSize, dstStep, dstROI, 1.0f / xFactor, 1.0f / yFactor);
}
else if (interpolation == nppStBicubic)
{
texSrc2D.addressMode[0] = cudaAddressModeMirror;
texSrc2D.addressMode[1] = cudaAddressModeMirror;
texSrc2D.normalized = true;
cudaChannelFormatDesc desc = cudaCreateChannelDesc <float> ();
cudaBindTexture2D (0, texSrc2D, pSrc, desc, srcSize.width, srcSize.height,
nSrcStep);
dim3 ctaSize (32, 6);
dim3 gridSize ((dstSize.width + ctaSize.x - 1) / ctaSize.x,
(dstSize.height + ctaSize.y - 1) / ctaSize.y);
resizeBicubic <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>>
(srcSize, srcROI, dstSize, dstStep, pDst, dstROI, 1.0f / xFactor, 1.0f / yFactor);
}
else
{
status = NPPST_ERROR;
}
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return status;
}
|
497eee17c4403900a9af56fc1272f390226042ac.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__
void coarse_psd_matrix(const double* window, const double* psd, double* output_real, double* output_imag, int stride, int length, int output_size){
unsigned int ii, jj, kk, x_idx, y_idx;
unsigned int x_idx_2, y_idx_2, len;
double wxr, wxi, wyr, wyi;
double total_real, total_imag;
int tid, tid_2;
unsigned long out_size;
tid = blockDim.x * blockIdx.x + threadIdx.x;
len = length;
out_size = output_size;
ii = tid % out_size;
jj = tid / out_size;
if (ii < jj) {
return;
} else {
tid_2 = ii * out_size + jj;
}
x_idx = ii * stride;
y_idx = jj * stride;
total_real = 0;
total_imag = 0;
for (kk = 0; kk < len; kk++){
if (x_idx >= kk)
x_idx_2 = x_idx - kk;
else
x_idx_2 = x_idx + len - kk;
if (y_idx >= kk)
y_idx_2 = y_idx - kk;
else
y_idx_2 = y_idx + len - kk;
wxr = window[2 * x_idx_2];
wxi = window[2 * x_idx_2 + 1];
wyr = window[2 * y_idx_2];
wyi = window[2 * y_idx_2 + 1];
total_real += psd[kk] * stride * (wxr * wyr + wxi * wyi);
total_imag += psd[kk] * stride * (wxi * wyr - wxr * wyi);
}
output_real[tid] = total_real;
output_imag[tid] = total_imag;
if (ii > jj) {
output_real[tid_2] = total_real;
output_imag[tid_2] = -total_imag;
}
}
|
497eee17c4403900a9af56fc1272f390226042ac.cu
|
extern "C" __global__
void coarse_psd_matrix(const double* window, const double* psd, double* output_real, double* output_imag, int stride, int length, int output_size){
unsigned int ii, jj, kk, x_idx, y_idx;
unsigned int x_idx_2, y_idx_2, len;
double wxr, wxi, wyr, wyi;
double total_real, total_imag;
int tid, tid_2;
unsigned long out_size;
tid = blockDim.x * blockIdx.x + threadIdx.x;
len = length;
out_size = output_size;
ii = tid % out_size;
jj = tid / out_size;
if (ii < jj) {
return;
} else {
tid_2 = ii * out_size + jj;
}
x_idx = ii * stride;
y_idx = jj * stride;
total_real = 0;
total_imag = 0;
for (kk = 0; kk < len; kk++){
if (x_idx >= kk)
x_idx_2 = x_idx - kk;
else
x_idx_2 = x_idx + len - kk;
if (y_idx >= kk)
y_idx_2 = y_idx - kk;
else
y_idx_2 = y_idx + len - kk;
wxr = window[2 * x_idx_2];
wxi = window[2 * x_idx_2 + 1];
wyr = window[2 * y_idx_2];
wyi = window[2 * y_idx_2 + 1];
total_real += psd[kk] * stride * (wxr * wyr + wxi * wyi);
total_imag += psd[kk] * stride * (wxi * wyr - wxr * wyi);
}
output_real[tid] = total_real;
output_imag[tid] = total_imag;
if (ii > jj) {
output_real[tid_2] = total_real;
output_imag[tid_2] = -total_imag;
}
}
|
5ff3574f29e7e2d12eda2c3794c5683a9757d3d3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "helper.h"
#include "math.h"
__device__ float norm_d(float a[3])
{
return sqrt( (a[0]*a[0]) + (a[1]*a[1]) + (a[2]*a[2]));
}
__device__ void cross_d(float a[3], float b[3], float temp[3])
{
temp[0] = a[1]*b[2] - a[2]*b[1];
temp[1] = a[2]*b[0] - a[0]*b[2];
temp[2] = a[0]*b[1] - a[1]*b[0];
}
__device__ float darAreaElemento(int i, int *faces_d, float *vertex_d, int nNodos, int nCeldas) {
float a[3], b[3], c[3], v1[3], v2[3], temp[3];
int A, B, C;
A=FACES_D(i, 0);
B=FACES_D(i, 1);
C=FACES_D(i, 2);
a[0]=VERTEX_D(A, 0);
a[1]=VERTEX_D(A, 1);
a[2]=VERTEX_D(A, 2);
b[0]=VERTEX_D(B, 0);
b[1]=VERTEX_D(B, 1);
b[2]=VERTEX_D(B, 2);
c[0]=VERTEX_D(C, 0);
c[1]=VERTEX_D(C, 1);
c[2]=VERTEX_D(C, 2);
v1[0] = b[0] - a[0];
v1[1] = b[1] - a[1];
v1[2] = b[2] - a[2];
v2[0] = c[0] - a[0];
v2[1] = c[1] - a[1];
v2[2] = c[2] - a[2];
cross_d(v1, v2, temp);
return norm_d(temp)/2.0;
}
__global__ void calcular_cambio_area(int nCeldas, int nNodos, int *faces_d, float *vertex_d, int *faces_ref_d, float *vertex_ref_d, float *area_d) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < nCeldas) {
float a = darAreaElemento(i, faces_d, vertex_d, nNodos, nCeldas);
float b = darAreaElemento(i, faces_ref_d, vertex_ref_d, nNodos, nCeldas);
area_d[i] = a - b;
}
}
void calcular_cambio_area_wrapper(int nCeldas, int nNodos, int *faces_d, float *vertex_d, int *faces_ref_d, float *vertex_ref_d, float *area_d) {
//X*Y*Z = 9261;
//Maximum number of threads per block: 1024
dim3 grid_size;
grid_size.x = nCeldas/1024 + 1;
dim3 block_size;
// 1000 threads per blocks
block_size.x = 1024;
//Launch kernel
hipLaunchKernelGGL(( calcular_cambio_area), dim3(grid_size), dim3(block_size), 0, 0, nCeldas, nNodos, faces_d, vertex_d, faces_ref_d, vertex_ref_d, area_d);
}
|
5ff3574f29e7e2d12eda2c3794c5683a9757d3d3.cu
|
#include "helper.h"
#include "math.h"
__device__ float norm_d(float a[3])
{
return sqrt( (a[0]*a[0]) + (a[1]*a[1]) + (a[2]*a[2]));
}
__device__ void cross_d(float a[3], float b[3], float temp[3])
{
temp[0] = a[1]*b[2] - a[2]*b[1];
temp[1] = a[2]*b[0] - a[0]*b[2];
temp[2] = a[0]*b[1] - a[1]*b[0];
}
__device__ float darAreaElemento(int i, int *faces_d, float *vertex_d, int nNodos, int nCeldas) {
float a[3], b[3], c[3], v1[3], v2[3], temp[3];
int A, B, C;
A=FACES_D(i, 0);
B=FACES_D(i, 1);
C=FACES_D(i, 2);
a[0]=VERTEX_D(A, 0);
a[1]=VERTEX_D(A, 1);
a[2]=VERTEX_D(A, 2);
b[0]=VERTEX_D(B, 0);
b[1]=VERTEX_D(B, 1);
b[2]=VERTEX_D(B, 2);
c[0]=VERTEX_D(C, 0);
c[1]=VERTEX_D(C, 1);
c[2]=VERTEX_D(C, 2);
v1[0] = b[0] - a[0];
v1[1] = b[1] - a[1];
v1[2] = b[2] - a[2];
v2[0] = c[0] - a[0];
v2[1] = c[1] - a[1];
v2[2] = c[2] - a[2];
cross_d(v1, v2, temp);
return norm_d(temp)/2.0;
}
__global__ void calcular_cambio_area(int nCeldas, int nNodos, int *faces_d, float *vertex_d, int *faces_ref_d, float *vertex_ref_d, float *area_d) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < nCeldas) {
float a = darAreaElemento(i, faces_d, vertex_d, nNodos, nCeldas);
float b = darAreaElemento(i, faces_ref_d, vertex_ref_d, nNodos, nCeldas);
area_d[i] = a - b;
}
}
void calcular_cambio_area_wrapper(int nCeldas, int nNodos, int *faces_d, float *vertex_d, int *faces_ref_d, float *vertex_ref_d, float *area_d) {
//X*Y*Z = 9261;
//Maximum number of threads per block: 1024
dim3 grid_size;
grid_size.x = nCeldas/1024 + 1;
dim3 block_size;
// 1000 threads per blocks
block_size.x = 1024;
//Launch kernel
calcular_cambio_area<<<grid_size, block_size>>>(nCeldas, nNodos, faces_d, vertex_d, faces_ref_d, vertex_ref_d, area_d);
}
|
6f1e1b07ab5c1d27a175aa16eb4958b7abc7519a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* special helper function for computing centroids */
// What is the strategy here?
//
// We have two output arrays (for the weighted coordinates) and one input array
// with the values. For each pixel, we compute the x and y coordinates from the
// thread index, and store the product of these with the pixel value.
// Later we will compute the sum...
//
// This could probably be composed as two vmul's followed by a vsum?
#define KERNEL_FUNC_QUALIFIER __global__
#define CENT_KERNEL( typ ) \
\
KERNEL_FUNC_QUALIFIER void typ##_slow_cent_helper \
( /*std_type *x_array, dim3 inc1, std_type *y_array, dim3 inc2, \
std_type *input, dim3 inc3, dim3 len*/ DECLARE_KERN_ARGS_SLEN_3 ) \
\
{ \
/*dim3 index;*/ \
/*uint32_t offset1, offset2, offset3;*/ \
DECL_INDICES_3 \
std_type p; \
\
/*index.x = blockIdx.x * blockDim.x + threadIdx.x;*/ \
/*index.y = blockIdx.y * blockDim.y + threadIdx.y;*/ \
SET_INDICES_3 \
\
/*offset1 = index.y * inc1.x + index.x;*/ \
/*offset2 = index.y * inc2.x + index.x;*/ \
/*offset3 = index.y * inc3.x + index.x;*/ \
\
/*p = *( c + offset3);*/ \
p = slow_src2; /* third arg, no first source */ \
slow_dst1 = p * index3.d5_dim[1]; /* x */ \
slow_dst2 = p * index3.d5_dim[2]; /* y */ \
/* *(a+offset1) = p * index.x; */ \
/* *(b+offset2) = p * index.y; */ \
}
#define CK( c ) CENT_KERNEL( c )
CK( type_code )
|
6f1e1b07ab5c1d27a175aa16eb4958b7abc7519a.cu
|
/* special helper function for computing centroids */
// What is the strategy here?
//
// We have two output arrays (for the weighted coordinates) and one input array
// with the values. For each pixel, we compute the x and y coordinates from the
// thread index, and store the product of these with the pixel value.
// Later we will compute the sum...
//
// This could probably be composed as two vmul's followed by a vsum?
#define KERNEL_FUNC_QUALIFIER __global__
#define CENT_KERNEL( typ ) \
\
KERNEL_FUNC_QUALIFIER void typ##_slow_cent_helper \
( /*std_type *x_array, dim3 inc1, std_type *y_array, dim3 inc2, \
std_type *input, dim3 inc3, dim3 len*/ DECLARE_KERN_ARGS_SLEN_3 ) \
\
{ \
/*dim3 index;*/ \
/*uint32_t offset1, offset2, offset3;*/ \
DECL_INDICES_3 \
std_type p; \
\
/*index.x = blockIdx.x * blockDim.x + threadIdx.x;*/ \
/*index.y = blockIdx.y * blockDim.y + threadIdx.y;*/ \
SET_INDICES_3 \
\
/*offset1 = index.y * inc1.x + index.x;*/ \
/*offset2 = index.y * inc2.x + index.x;*/ \
/*offset3 = index.y * inc3.x + index.x;*/ \
\
/*p = *( c + offset3);*/ \
p = slow_src2; /* third arg, no first source */ \
slow_dst1 = p * index3.d5_dim[1]; /* x */ \
slow_dst2 = p * index3.d5_dim[2]; /* y */ \
/* *(a+offset1) = p * index.x; */ \
/* *(b+offset2) = p * index.y; */ \
}
#define CK( c ) CENT_KERNEL( c )
CK( type_code )
|
8c4557c42043913c41516bfe920f7b5195ee64da.hip
|
// !!! This is a file automatically generated by hipify!!!
// ########################################################################
// Practical Course: GPU Programming in Computer Vision
// Technical University of Munich, Computer Vision Group
// ########################################################################
#include "histogram.cuh"
#include <iostream>
#include <hip/hip_runtime.h>
#include "helper.cuh"
__global__
void computeHistogramAtomicKernel(int *histogram, float *imgIn, int nbins, int w, int h, int nc)
{
// TODO (13.1) update histogram using atomic operations
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int idx = y*w + x;
if (x >= w || y >= h) return;
float intensity = 0.f;
if (nc == 3) intensity = 0.3f*imgIn[0*h*w + idx] + 0.59f*imgIn[1*h*w + idx] + 0.11f*imgIn[2*h*w + idx];
else if (nc == 1) intensity = imgIn[idx];
else return;
int bidx = 256*intensity;
atomicAdd(&histogram[bidx], 1);
}
__global__
void computeHistogramAtomicSharedMemKernel(int *histogram, float *imgIn, int w, int h, int nc)
{
// TODO (13.3) update histogram using atomic operations on shared memory
extern __shared__ float shared_histogram[];
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= h*w) return;
// zero-out the histogram
shared_histogram[threadIdx.x] = 0;
__syncthreads();
// compute intensity and update the histogram
float intensity = 0.f;
if (nc == 3) intensity = 0.3f*imgIn[0*h*w + idx] + 0.59f*imgIn[1*h*w + idx] + 0.11f*imgIn[2*h*w + idx];
else if (nc == 1) intensity = imgIn[idx];
else return;
int bidx = intensity*256;
atomicAdd(&shared_histogram[bidx], 1);
__syncthreads();
// update the global histogram
atomicAdd(&histogram[threadIdx.x], shared_histogram[threadIdx.x]);
}
void computeHistogramCuda(int *histogram, float *imgIn, int nbins, int w, int h, int nc)
{
if (!histogram)
{
std::cerr << "histogram not allocated!" << std::endl;
return;
}
// calculate block and grid size
dim3 block(32, 32, 1); // TODO (13.1) specify suitable block size
dim3 grid = computeGrid2D(block, w, h); // TODO (13.1) compute grid dimensions
// run cuda kernel
// TODO (13.1) execute kernel for histogram update using atomic operations
hipLaunchKernelGGL(( computeHistogramAtomicKernel) , dim3(grid), dim3(block), 0, 0, histogram, imgIn, nbins, w, h, nc);
// check for errors
// TODO (13.1)
CUDA_CHECK;
}
void computeHistogramCudaShared(int *histogram, float *imgIn, int nbins, int w, int h, int nc)
{
if (!histogram)
{
std::cerr << "histogram not allocated!" << std::endl;
return;
}
// calculate block and grid size
dim3 block(nbins, 1, 1); // TODO (13.3) specify suitable block size
dim3 grid = computeGrid1D(block, w*h); // TODO (13.3) compute grid dimensions
int smBytes = block.x*sizeof(int);
// run cuda kernel
// TODO (13.3) execute kernel for histogram update using atomic operations on shared memory
hipLaunchKernelGGL(( computeHistogramAtomicSharedMemKernel) , dim3(grid), dim3(block), smBytes, 0, histogram, imgIn, w, h, nc);
// check for errors
// TODO (13.3)
CUDA_CHECK;
}
|
8c4557c42043913c41516bfe920f7b5195ee64da.cu
|
// ########################################################################
// Practical Course: GPU Programming in Computer Vision
// Technical University of Munich, Computer Vision Group
// ########################################################################
#include "histogram.cuh"
#include <iostream>
#include <cuda_runtime.h>
#include "helper.cuh"
__global__
void computeHistogramAtomicKernel(int *histogram, float *imgIn, int nbins, int w, int h, int nc)
{
// TODO (13.1) update histogram using atomic operations
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int idx = y*w + x;
if (x >= w || y >= h) return;
float intensity = 0.f;
if (nc == 3) intensity = 0.3f*imgIn[0*h*w + idx] + 0.59f*imgIn[1*h*w + idx] + 0.11f*imgIn[2*h*w + idx];
else if (nc == 1) intensity = imgIn[idx];
else return;
int bidx = 256*intensity;
atomicAdd(&histogram[bidx], 1);
}
__global__
void computeHistogramAtomicSharedMemKernel(int *histogram, float *imgIn, int w, int h, int nc)
{
// TODO (13.3) update histogram using atomic operations on shared memory
extern __shared__ float shared_histogram[];
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= h*w) return;
// zero-out the histogram
shared_histogram[threadIdx.x] = 0;
__syncthreads();
// compute intensity and update the histogram
float intensity = 0.f;
if (nc == 3) intensity = 0.3f*imgIn[0*h*w + idx] + 0.59f*imgIn[1*h*w + idx] + 0.11f*imgIn[2*h*w + idx];
else if (nc == 1) intensity = imgIn[idx];
else return;
int bidx = intensity*256;
atomicAdd(&shared_histogram[bidx], 1);
__syncthreads();
// update the global histogram
atomicAdd(&histogram[threadIdx.x], shared_histogram[threadIdx.x]);
}
void computeHistogramCuda(int *histogram, float *imgIn, int nbins, int w, int h, int nc)
{
if (!histogram)
{
std::cerr << "histogram not allocated!" << std::endl;
return;
}
// calculate block and grid size
dim3 block(32, 32, 1); // TODO (13.1) specify suitable block size
dim3 grid = computeGrid2D(block, w, h); // TODO (13.1) compute grid dimensions
// run cuda kernel
// TODO (13.1) execute kernel for histogram update using atomic operations
computeHistogramAtomicKernel <<<grid, block>>> (histogram, imgIn, nbins, w, h, nc);
// check for errors
// TODO (13.1)
CUDA_CHECK;
}
void computeHistogramCudaShared(int *histogram, float *imgIn, int nbins, int w, int h, int nc)
{
if (!histogram)
{
std::cerr << "histogram not allocated!" << std::endl;
return;
}
// calculate block and grid size
dim3 block(nbins, 1, 1); // TODO (13.3) specify suitable block size
dim3 grid = computeGrid1D(block, w*h); // TODO (13.3) compute grid dimensions
int smBytes = block.x*sizeof(int);
// run cuda kernel
// TODO (13.3) execute kernel for histogram update using atomic operations on shared memory
computeHistogramAtomicSharedMemKernel <<<grid, block, smBytes>>> (histogram, imgIn, w, h, nc);
// check for errors
// TODO (13.3)
CUDA_CHECK;
}
|
7bc6c99469d323c1b65548efaef9a669197fc7e0.hip
|
// !!! This is a file automatically generated by hipify!!!
//xfail:BOOGIE_ERROR
//--blockDim=128 --gridDim=16 --no-inline
//assert\(false\)
#include <stdio.h>
#include <assert.h>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime_api.h"
typedef void(*funcType)(float*);
__device__ void a(float *v)
{
printf ("funcA with p%f = %f", *v, *v);
}
__device__ void b(float *v)
{
printf ("funcB with p%f = %f", *v, *v);
}
__device__ void c(float *v)
{
printf ("funcC with p%f = %f", *v, *v);
}
__device__ void d(float *v)
{
printf ("funcD with p%f = %f", *v, *v);
}
__device__ void e(float *v)
{
printf ("funcE with p%f = %f", *v, *v);
}
__global__ void should_fail(float * __restrict p1, float * __restrict p2, float * __restrict p3, float * __restrict p4, float * __restrict p5, int x, int y)
{
funcType fp = a;
switch(x) {
case 1:
fp = &a;
break;
case 2:
fp = &b;
break;
case 3:
fp = &c;
break;
case 4:
fp = &d;
break;
default:
fp = &e;
break;
}
switch(y) {
case 1:
fp(p1);
break;
case 2:
fp(p2);
break;
case 3:
fp(p3);
break;
case 4:
fp(p4);
break;
default:
fp(p5);
break;
}
assert(1);
}
int main (){
float p1, p2, p3, p4, p5;
float *dev_p1, *dev_p2, *dev_p3, *dev_p4, *dev_p5;
p1 = 1; p2 = 2; p3 = 3; p4 = 4; p5 = 5;
hipMalloc((void**)&dev_p1, sizeof(float));
hipMalloc((void**)&dev_p2, sizeof(float));
hipMalloc((void**)&dev_p3, sizeof(float));
hipMalloc((void**)&dev_p4, sizeof(float));
hipMalloc((void**)&dev_p5, sizeof(float));
hipMemcpy(dev_p1,&p1, sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(dev_p2,&p2, sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(dev_p3,&p3, sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(dev_p4,&p4, sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(dev_p5,&p5, sizeof(float),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( should_fail) , dim3(1),dim3(2), 0, 0, dev_p1, dev_p2, dev_p3, dev_p4, dev_p5, 4, 4);
//ESBMC_verify_kernel_f(should_fail,1,2,dev_p1, dev_p2, dev_p3, dev_p4, dev_p5, 4, 4);
hipMemcpy(&p1,dev_p1,sizeof(float),hipMemcpyDeviceToHost);
hipMemcpy(&p2,dev_p2,sizeof(float),hipMemcpyDeviceToHost);
hipMemcpy(&p3,dev_p3,sizeof(float),hipMemcpyDeviceToHost);
hipMemcpy(&p4,dev_p4,sizeof(float),hipMemcpyDeviceToHost);
hipMemcpy(&p5,dev_p5,sizeof(float),hipMemcpyDeviceToHost);
hipFree(dev_p1);
hipFree(dev_p2);
hipFree(dev_p3);
hipFree(dev_p4);
hipFree(dev_p5);
return 0;
}
|
7bc6c99469d323c1b65548efaef9a669197fc7e0.cu
|
//xfail:BOOGIE_ERROR
//--blockDim=128 --gridDim=16 --no-inline
//assert\(false\)
#include <stdio.h>
#include <assert.h>
#include "cuda.h"
#include "cuda_runtime_api.h"
typedef void(*funcType)(float*);
__device__ void a(float *v)
{
printf ("funcA with p%f = %f", *v, *v);
}
__device__ void b(float *v)
{
printf ("funcB with p%f = %f", *v, *v);
}
__device__ void c(float *v)
{
printf ("funcC with p%f = %f", *v, *v);
}
__device__ void d(float *v)
{
printf ("funcD with p%f = %f", *v, *v);
}
__device__ void e(float *v)
{
printf ("funcE with p%f = %f", *v, *v);
}
__global__ void should_fail(float * __restrict p1, float * __restrict p2, float * __restrict p3, float * __restrict p4, float * __restrict p5, int x, int y)
{
funcType fp = a;
switch(x) {
case 1:
fp = &a;
break;
case 2:
fp = &b;
break;
case 3:
fp = &c;
break;
case 4:
fp = &d;
break;
default:
fp = &e;
break;
}
switch(y) {
case 1:
fp(p1);
break;
case 2:
fp(p2);
break;
case 3:
fp(p3);
break;
case 4:
fp(p4);
break;
default:
fp(p5);
break;
}
assert(1);
}
int main (){
float p1, p2, p3, p4, p5;
float *dev_p1, *dev_p2, *dev_p3, *dev_p4, *dev_p5;
p1 = 1; p2 = 2; p3 = 3; p4 = 4; p5 = 5;
cudaMalloc((void**)&dev_p1, sizeof(float));
cudaMalloc((void**)&dev_p2, sizeof(float));
cudaMalloc((void**)&dev_p3, sizeof(float));
cudaMalloc((void**)&dev_p4, sizeof(float));
cudaMalloc((void**)&dev_p5, sizeof(float));
cudaMemcpy(dev_p1,&p1, sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_p2,&p2, sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_p3,&p3, sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_p4,&p4, sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_p5,&p5, sizeof(float),cudaMemcpyHostToDevice);
should_fail <<<1,2>>>(dev_p1, dev_p2, dev_p3, dev_p4, dev_p5, 4, 4);
//ESBMC_verify_kernel_f(should_fail,1,2,dev_p1, dev_p2, dev_p3, dev_p4, dev_p5, 4, 4);
cudaMemcpy(&p1,dev_p1,sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(&p2,dev_p2,sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(&p3,dev_p3,sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(&p4,dev_p4,sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(&p5,dev_p5,sizeof(float),cudaMemcpyDeviceToHost);
cudaFree(dev_p1);
cudaFree(dev_p2);
cudaFree(dev_p3);
cudaFree(dev_p4);
cudaFree(dev_p5);
return 0;
}
|
2194de6db799a35217e1f6ca6d42ca1a4d6e6ef6.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2009, Jiri Matela
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <unistd.h>
#include <error.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <errno.h>
#include <string.h>
#include <assert.h>
#include <sys/time.h>
#include <getopt.h>
#include "common.h"
#include "components.h"
#include "dwt.h"
struct dwt {
char * srcFilename;
char * outFilename;
unsigned char *srcImg;
int pixWidth;
int pixHeight;
int components;
int dwtLvls;
};
int getImg(char * srcFilename, unsigned char *srcImg, int inputSize)
{
// printf("Loading ipnput: %s\n", srcFilename);
char *path = "/home/vkz4947/gpgpu-sim_simulations/benchmarks/src/cuda/rodinia/3.1/data/dwt2d/";
char *newSrc = NULL;
if((newSrc = (char *)malloc(strlen(srcFilename)+strlen(path)+1)) != NULL)
{
newSrc[0] = '\0';
strcat(newSrc, path);
strcat(newSrc, srcFilename);
srcFilename= newSrc;
}
printf("Loading ipnput: %s\n", srcFilename);
//srcFilename = strcat("../../data/dwt2d/",srcFilename);
//read image
int i = open(srcFilename, O_RDONLY, 0644);
if (i == -1) {
error(0,errno,"cannot access %s", srcFilename);
return -1;
}
int ret = read(i, srcImg, inputSize);
printf("precteno %d, inputsize %d\n", ret, inputSize);
close(i);
return 0;
}
void usage() {
printf("dwt [otpions] src_img.rgb <out_img.dwt>\n\
-d, --dimension\t\tdimensions of src img, e.g. 1920x1080\n\
-c, --components\t\tnumber of color components, default 3\n\
-b, --depth\t\t\tbit depth, default 8\n\
-l, --level\t\t\tDWT level, default 3\n\
-D, --device\t\t\tcuda device\n\
-f, --forward\t\t\tforward transform\n\
-r, --reverse\t\t\treverse transform\n\
-9, --97\t\t\t9/7 transform\n\
-5, --53\t\t\t5/3 transform\n\
-w --write-visual\t\twrite output in visual (tiled) fashion instead of the linear\n");
}
template <typename T>
void processDWT(struct dwt *d, int forward, int writeVisual)
{
int componentSize = d->pixWidth*d->pixHeight*sizeof(T);
T *c_r_out, *backup ;
hipMalloc((void**)&c_r_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_r_out, 0, componentSize);
cudaCheckError("Memset device memory");
hipMalloc((void**)&backup, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
hipMemset(backup, 0, componentSize);
cudaCheckError("Memset device memory");
if (d->components == 3) {
/* Alloc two more buffers for G and B */
T *c_g_out, *c_b_out;
hipMalloc((void**)&c_g_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_g_out, 0, componentSize);
cudaCheckError("Memset device memory");
hipMalloc((void**)&c_b_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_b_out, 0, componentSize);
cudaCheckError("Memset device memory");
/* Load components */
T *c_r, *c_g, *c_b;
hipMalloc((void**)&c_r, componentSize); //< R, aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_r, 0, componentSize);
cudaCheckError("Memset device memory");
hipMalloc((void**)&c_g, componentSize); //< G, aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_g, 0, componentSize);
cudaCheckError("Memset device memory");
hipMalloc((void**)&c_b, componentSize); //< B, aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_b, 0, componentSize);
cudaCheckError("Memset device memory");
rgbToComponents(c_r, c_g, c_b, d->srcImg, d->pixWidth, d->pixHeight);
/* Compute DWT and always store into file */
nStage2dDWT(c_r, c_r_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
nStage2dDWT(c_g, c_g_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
nStage2dDWT(c_b, c_b_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
// -------test----------
// T *h_r_out=(T*)malloc(componentSize);
// hipMemcpy(h_r_out, c_g_out, componentSize, hipMemcpyDeviceToHost);
// int ii;
// for(ii=0;ii<componentSize/sizeof(T);ii++) {
// fprintf(stderr, "%d ", h_r_out[ii]);
// if((ii+1) % (d->pixWidth) == 0) fprintf(stderr, "\n");
// }
// -------test----------
/* Store DWT to file */
#ifdef OUTPUT
if (writeVisual) {
writeNStage2DDWT(c_r_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".r");
writeNStage2DDWT(c_g_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".g");
writeNStage2DDWT(c_b_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".b");
} else {
writeLinear(c_r_out, d->pixWidth, d->pixHeight, d->outFilename, ".r");
writeLinear(c_g_out, d->pixWidth, d->pixHeight, d->outFilename, ".g");
writeLinear(c_b_out, d->pixWidth, d->pixHeight, d->outFilename, ".b");
}
#endif
hipFree(c_r);
cudaCheckError("Cuda free");
hipFree(c_g);
cudaCheckError("Cuda free");
hipFree(c_b);
cudaCheckError("Cuda free");
hipFree(c_g_out);
cudaCheckError("Cuda free");
hipFree(c_b_out);
cudaCheckError("Cuda free");
}
else if (d->components == 1) {
//Load component
T *c_r;
hipMalloc((void**)&(c_r), componentSize); //< R, aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_r, 0, componentSize);
cudaCheckError("Memset device memory");
bwToComponent(c_r, d->srcImg, d->pixWidth, d->pixHeight);
// Compute DWT
nStage2dDWT(c_r, c_r_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
// Store DWT to file
// #ifdef OUTPUT
if (writeVisual) {
writeNStage2DDWT(c_r_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".out");
} else {
writeLinear(c_r_out, d->pixWidth, d->pixHeight, d->outFilename, ".lin.out");
}
// #endif
hipFree(c_r);
cudaCheckError("Cuda free");
}
hipFree(c_r_out);
cudaCheckError("Cuda free device");
hipFree(backup);
cudaCheckError("Cuda free device");
}
int main(int argc, char **argv)
{
int optindex = 0;
char ch;
struct option longopts[] = {
{"dimension", required_argument, 0, 'd'}, //dimensions of src img
{"components", required_argument, 0, 'c'}, //numger of components of src img
{"depth", required_argument, 0, 'b'}, //bit depth of src img
{"level", required_argument, 0, 'l'}, //level of dwt
{"device", required_argument, 0, 'D'}, //cuda device
{"forward", no_argument, 0, 'f'}, //forward transform
{"reverse", no_argument, 0, 'r'}, //reverse transform
{"97", no_argument, 0, '9'}, //9/7 transform
{"53", no_argument, 0, '5' }, //5/3transform
{"write-visual",no_argument, 0, 'w' }, //write output (subbands) in visual (tiled) order instead of linear
{"help", no_argument, 0, 'h'}
};
int pixWidth = 0; //<real pixWidth
int pixHeight = 0; //<real pixHeight
int compCount = 3; //number of components; 3 for RGB or YUV, 4 for RGBA
int bitDepth = 8;
int dwtLvls = 3; //default numuber of DWT levels
int device = 0;
int forward = 1; //forward transform
int dwt97 = 1; //1=dwt9/7, 0=dwt5/3 transform
int writeVisual = 0; //write output (subbands) in visual (tiled) order instead of linear
char * pos;
while ((ch = getopt_long(argc, argv, "d:c:b:l:D:fr95wh", longopts, &optindex)) != -1) {
switch (ch) {
case 'd':
pixWidth = atoi(optarg);
pos = strstr(optarg, "x");
if (pos == NULL || pixWidth == 0 || (strlen(pos) >= strlen(optarg))) {
usage();
return -1;
}
pixHeight = atoi(pos+1);
break;
case 'c':
compCount = atoi(optarg);
break;
case 'b':
bitDepth = atoi(optarg);
break;
case 'l':
dwtLvls = atoi(optarg);
break;
case 'D':
device = atoi(optarg);
break;
case 'f':
forward = 1;
break;
case 'r':
forward = 0;
break;
case '9':
dwt97 = 1;
break;
case '5':
dwt97 = 0;
break;
case 'w':
writeVisual = 1;
break;
case 'h':
usage();
return 0;
case '?':
return -1;
default :
usage();
return -1;
}
}
argc -= optind;
argv += optind;
if (argc == 0) { // at least one filename is expected
printf("Please supply src file name\n");
usage();
return -1;
}
if (pixWidth <= 0 || pixHeight <=0) {
printf("Wrong or missing dimensions\n");
usage();
return -1;
}
if (forward == 0) {
writeVisual = 0; //do not write visual when RDWT
}
// device init
int devCount;
hipGetDeviceCount(&devCount);
cudaCheckError("Get device count");
if (devCount == 0) {
printf("No CUDA enabled device\n");
return -1;
}
if (device < 0 || device > devCount -1) {
printf("Selected device %d is out of bound. Devices on your system are in range %d - %d\n",
device, 0, devCount -1);
return -1;
}
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, device);
cudaCheckError("Get device properties");
if (devProp.major < 1) {
printf("Device %d does not support CUDA\n", device);
return -1;
}
printf("Using device %d: %s\n", device, devProp.name);
hipSetDevice(device);
cudaCheckError("Set selected device");
struct dwt *d;
d = (struct dwt *)malloc(sizeof(struct dwt));
d->srcImg = NULL;
d->pixWidth = pixWidth;
d->pixHeight = pixHeight;
d->components = compCount;
d->dwtLvls = dwtLvls;
// file names
d->srcFilename = (char *)malloc(strlen(argv[0]));
strcpy(d->srcFilename, argv[0]);
if (argc == 1) { // only one filename supplyed
d->outFilename = (char *)malloc(strlen(d->srcFilename)+4);
strcpy(d->outFilename, d->srcFilename);
strcpy(d->outFilename+strlen(d->srcFilename), ".dwt");
} else {
d->outFilename = strdup(argv[1]);
}
//Input review
printf("Source file:\t\t%s\n", d->srcFilename);
printf(" Dimensions:\t\t%dx%d\n", pixWidth, pixHeight);
printf(" Components count:\t%d\n", compCount);
printf(" Bit depth:\t\t%d\n", bitDepth);
printf(" DWT levels:\t\t%d\n", dwtLvls);
printf(" Forward transform:\t%d\n", forward);
printf(" 9/7 transform:\t\t%d\n", dwt97);
//data sizes
int inputSize = pixWidth*pixHeight*compCount; //<amount of data (in bytes) to proccess
//load img source image
hipHostMalloc((void **)&d->srcImg, inputSize);
cudaCheckError("Alloc host memory");
if (getImg(d->srcFilename, d->srcImg, inputSize) == -1)
return -1;
/* DWT */
if (forward == 1) {
if(dwt97 == 1 )
processDWT<float>(d, forward, writeVisual);
else // 5/3
processDWT<int>(d, forward, writeVisual);
}
else { // reverse
if(dwt97 == 1 )
processDWT<float>(d, forward, writeVisual);
else // 5/3
processDWT<int>(d, forward, writeVisual);
}
//writeComponent(r_cuda, pixWidth, pixHeight, srcFilename, ".g");
//writeComponent(g_wave_cuda, 512000, ".g");
//writeComponent(g_cuda, componentSize, ".g");
//writeComponent(b_wave_cuda, componentSize, ".b");
hipHostFree(d->srcImg);
cudaCheckError("Cuda free host");
return 0;
}
|
2194de6db799a35217e1f6ca6d42ca1a4d6e6ef6.cu
|
/*
* Copyright (c) 2009, Jiri Matela
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <unistd.h>
#include <error.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <errno.h>
#include <string.h>
#include <assert.h>
#include <sys/time.h>
#include <getopt.h>
#include "common.h"
#include "components.h"
#include "dwt.h"
struct dwt {
char * srcFilename;
char * outFilename;
unsigned char *srcImg;
int pixWidth;
int pixHeight;
int components;
int dwtLvls;
};
int getImg(char * srcFilename, unsigned char *srcImg, int inputSize)
{
// printf("Loading ipnput: %s\n", srcFilename);
char *path = "/home/vkz4947/gpgpu-sim_simulations/benchmarks/src/cuda/rodinia/3.1/data/dwt2d/";
char *newSrc = NULL;
if((newSrc = (char *)malloc(strlen(srcFilename)+strlen(path)+1)) != NULL)
{
newSrc[0] = '\0';
strcat(newSrc, path);
strcat(newSrc, srcFilename);
srcFilename= newSrc;
}
printf("Loading ipnput: %s\n", srcFilename);
//srcFilename = strcat("../../data/dwt2d/",srcFilename);
//read image
int i = open(srcFilename, O_RDONLY, 0644);
if (i == -1) {
error(0,errno,"cannot access %s", srcFilename);
return -1;
}
int ret = read(i, srcImg, inputSize);
printf("precteno %d, inputsize %d\n", ret, inputSize);
close(i);
return 0;
}
void usage() {
printf("dwt [otpions] src_img.rgb <out_img.dwt>\n\
-d, --dimension\t\tdimensions of src img, e.g. 1920x1080\n\
-c, --components\t\tnumber of color components, default 3\n\
-b, --depth\t\t\tbit depth, default 8\n\
-l, --level\t\t\tDWT level, default 3\n\
-D, --device\t\t\tcuda device\n\
-f, --forward\t\t\tforward transform\n\
-r, --reverse\t\t\treverse transform\n\
-9, --97\t\t\t9/7 transform\n\
-5, --53\t\t\t5/3 transform\n\
-w --write-visual\t\twrite output in visual (tiled) fashion instead of the linear\n");
}
template <typename T>
void processDWT(struct dwt *d, int forward, int writeVisual)
{
int componentSize = d->pixWidth*d->pixHeight*sizeof(T);
T *c_r_out, *backup ;
cudaMalloc((void**)&c_r_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_r_out, 0, componentSize);
cudaCheckError("Memset device memory");
cudaMalloc((void**)&backup, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(backup, 0, componentSize);
cudaCheckError("Memset device memory");
if (d->components == 3) {
/* Alloc two more buffers for G and B */
T *c_g_out, *c_b_out;
cudaMalloc((void**)&c_g_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_g_out, 0, componentSize);
cudaCheckError("Memset device memory");
cudaMalloc((void**)&c_b_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_b_out, 0, componentSize);
cudaCheckError("Memset device memory");
/* Load components */
T *c_r, *c_g, *c_b;
cudaMalloc((void**)&c_r, componentSize); //< R, aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_r, 0, componentSize);
cudaCheckError("Memset device memory");
cudaMalloc((void**)&c_g, componentSize); //< G, aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_g, 0, componentSize);
cudaCheckError("Memset device memory");
cudaMalloc((void**)&c_b, componentSize); //< B, aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_b, 0, componentSize);
cudaCheckError("Memset device memory");
rgbToComponents(c_r, c_g, c_b, d->srcImg, d->pixWidth, d->pixHeight);
/* Compute DWT and always store into file */
nStage2dDWT(c_r, c_r_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
nStage2dDWT(c_g, c_g_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
nStage2dDWT(c_b, c_b_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
// -------test----------
// T *h_r_out=(T*)malloc(componentSize);
// cudaMemcpy(h_r_out, c_g_out, componentSize, cudaMemcpyDeviceToHost);
// int ii;
// for(ii=0;ii<componentSize/sizeof(T);ii++) {
// fprintf(stderr, "%d ", h_r_out[ii]);
// if((ii+1) % (d->pixWidth) == 0) fprintf(stderr, "\n");
// }
// -------test----------
/* Store DWT to file */
#ifdef OUTPUT
if (writeVisual) {
writeNStage2DDWT(c_r_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".r");
writeNStage2DDWT(c_g_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".g");
writeNStage2DDWT(c_b_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".b");
} else {
writeLinear(c_r_out, d->pixWidth, d->pixHeight, d->outFilename, ".r");
writeLinear(c_g_out, d->pixWidth, d->pixHeight, d->outFilename, ".g");
writeLinear(c_b_out, d->pixWidth, d->pixHeight, d->outFilename, ".b");
}
#endif
cudaFree(c_r);
cudaCheckError("Cuda free");
cudaFree(c_g);
cudaCheckError("Cuda free");
cudaFree(c_b);
cudaCheckError("Cuda free");
cudaFree(c_g_out);
cudaCheckError("Cuda free");
cudaFree(c_b_out);
cudaCheckError("Cuda free");
}
else if (d->components == 1) {
//Load component
T *c_r;
cudaMalloc((void**)&(c_r), componentSize); //< R, aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_r, 0, componentSize);
cudaCheckError("Memset device memory");
bwToComponent(c_r, d->srcImg, d->pixWidth, d->pixHeight);
// Compute DWT
nStage2dDWT(c_r, c_r_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
// Store DWT to file
// #ifdef OUTPUT
if (writeVisual) {
writeNStage2DDWT(c_r_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".out");
} else {
writeLinear(c_r_out, d->pixWidth, d->pixHeight, d->outFilename, ".lin.out");
}
// #endif
cudaFree(c_r);
cudaCheckError("Cuda free");
}
cudaFree(c_r_out);
cudaCheckError("Cuda free device");
cudaFree(backup);
cudaCheckError("Cuda free device");
}
int main(int argc, char **argv)
{
int optindex = 0;
char ch;
struct option longopts[] = {
{"dimension", required_argument, 0, 'd'}, //dimensions of src img
{"components", required_argument, 0, 'c'}, //numger of components of src img
{"depth", required_argument, 0, 'b'}, //bit depth of src img
{"level", required_argument, 0, 'l'}, //level of dwt
{"device", required_argument, 0, 'D'}, //cuda device
{"forward", no_argument, 0, 'f'}, //forward transform
{"reverse", no_argument, 0, 'r'}, //reverse transform
{"97", no_argument, 0, '9'}, //9/7 transform
{"53", no_argument, 0, '5' }, //5/3transform
{"write-visual",no_argument, 0, 'w' }, //write output (subbands) in visual (tiled) order instead of linear
{"help", no_argument, 0, 'h'}
};
int pixWidth = 0; //<real pixWidth
int pixHeight = 0; //<real pixHeight
int compCount = 3; //number of components; 3 for RGB or YUV, 4 for RGBA
int bitDepth = 8;
int dwtLvls = 3; //default numuber of DWT levels
int device = 0;
int forward = 1; //forward transform
int dwt97 = 1; //1=dwt9/7, 0=dwt5/3 transform
int writeVisual = 0; //write output (subbands) in visual (tiled) order instead of linear
char * pos;
while ((ch = getopt_long(argc, argv, "d:c:b:l:D:fr95wh", longopts, &optindex)) != -1) {
switch (ch) {
case 'd':
pixWidth = atoi(optarg);
pos = strstr(optarg, "x");
if (pos == NULL || pixWidth == 0 || (strlen(pos) >= strlen(optarg))) {
usage();
return -1;
}
pixHeight = atoi(pos+1);
break;
case 'c':
compCount = atoi(optarg);
break;
case 'b':
bitDepth = atoi(optarg);
break;
case 'l':
dwtLvls = atoi(optarg);
break;
case 'D':
device = atoi(optarg);
break;
case 'f':
forward = 1;
break;
case 'r':
forward = 0;
break;
case '9':
dwt97 = 1;
break;
case '5':
dwt97 = 0;
break;
case 'w':
writeVisual = 1;
break;
case 'h':
usage();
return 0;
case '?':
return -1;
default :
usage();
return -1;
}
}
argc -= optind;
argv += optind;
if (argc == 0) { // at least one filename is expected
printf("Please supply src file name\n");
usage();
return -1;
}
if (pixWidth <= 0 || pixHeight <=0) {
printf("Wrong or missing dimensions\n");
usage();
return -1;
}
if (forward == 0) {
writeVisual = 0; //do not write visual when RDWT
}
// device init
int devCount;
cudaGetDeviceCount(&devCount);
cudaCheckError("Get device count");
if (devCount == 0) {
printf("No CUDA enabled device\n");
return -1;
}
if (device < 0 || device > devCount -1) {
printf("Selected device %d is out of bound. Devices on your system are in range %d - %d\n",
device, 0, devCount -1);
return -1;
}
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, device);
cudaCheckError("Get device properties");
if (devProp.major < 1) {
printf("Device %d does not support CUDA\n", device);
return -1;
}
printf("Using device %d: %s\n", device, devProp.name);
cudaSetDevice(device);
cudaCheckError("Set selected device");
struct dwt *d;
d = (struct dwt *)malloc(sizeof(struct dwt));
d->srcImg = NULL;
d->pixWidth = pixWidth;
d->pixHeight = pixHeight;
d->components = compCount;
d->dwtLvls = dwtLvls;
// file names
d->srcFilename = (char *)malloc(strlen(argv[0]));
strcpy(d->srcFilename, argv[0]);
if (argc == 1) { // only one filename supplyed
d->outFilename = (char *)malloc(strlen(d->srcFilename)+4);
strcpy(d->outFilename, d->srcFilename);
strcpy(d->outFilename+strlen(d->srcFilename), ".dwt");
} else {
d->outFilename = strdup(argv[1]);
}
//Input review
printf("Source file:\t\t%s\n", d->srcFilename);
printf(" Dimensions:\t\t%dx%d\n", pixWidth, pixHeight);
printf(" Components count:\t%d\n", compCount);
printf(" Bit depth:\t\t%d\n", bitDepth);
printf(" DWT levels:\t\t%d\n", dwtLvls);
printf(" Forward transform:\t%d\n", forward);
printf(" 9/7 transform:\t\t%d\n", dwt97);
//data sizes
int inputSize = pixWidth*pixHeight*compCount; //<amount of data (in bytes) to proccess
//load img source image
cudaMallocHost((void **)&d->srcImg, inputSize);
cudaCheckError("Alloc host memory");
if (getImg(d->srcFilename, d->srcImg, inputSize) == -1)
return -1;
/* DWT */
if (forward == 1) {
if(dwt97 == 1 )
processDWT<float>(d, forward, writeVisual);
else // 5/3
processDWT<int>(d, forward, writeVisual);
}
else { // reverse
if(dwt97 == 1 )
processDWT<float>(d, forward, writeVisual);
else // 5/3
processDWT<int>(d, forward, writeVisual);
}
//writeComponent(r_cuda, pixWidth, pixHeight, srcFilename, ".g");
//writeComponent(g_wave_cuda, 512000, ".g");
//writeComponent(g_cuda, componentSize, ".g");
//writeComponent(b_wave_cuda, componentSize, ".b");
cudaFreeHost(d->srcImg);
cudaCheckError("Cuda free host");
return 0;
}
|
c4ca4d87a01b6f7c82cc74655dfa356f38d812bd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2022 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for
* license information.
*/
#ifdef USE_CUDA_EXP
#include <LightGBM/cuda/cuda_algorithms.hpp>
#include "cuda_binary_metric.hpp"
#include "cuda_pointwise_metric.hpp"
#include "cuda_regression_metric.hpp"
namespace LightGBM {
template <typename CUDA_METRIC, bool USE_WEIGHTS>
__global__ void EvalKernel(const data_size_t num_data, const label_t* labels, const label_t* weights,
const double* scores, double* reduce_block_buffer) {
__shared__ double shared_mem_buffer[32];
const data_size_t index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x);
double point_metric = 0.0;
if (index < num_data) {
point_metric = USE_WEIGHTS ?
CUDA_METRIC::MetricOnPointCUDA(labels[index], scores[index]) * weights[index] :
CUDA_METRIC::MetricOnPointCUDA(labels[index], scores[index]);
}
const double block_sum_point_metric = ShuffleReduceSum<double>(point_metric, shared_mem_buffer, NUM_DATA_PER_EVAL_THREAD);
if (threadIdx.x == 0) {
reduce_block_buffer[blockIdx.x] = block_sum_point_metric;
}
if (USE_WEIGHTS) {
double weight = 0.0;
if (index < num_data) {
weight = static_cast<double>(weights[index]);
const double block_sum_weight = ShuffleReduceSum<double>(weight, shared_mem_buffer, NUM_DATA_PER_EVAL_THREAD);
if (threadIdx.x == 0) {
reduce_block_buffer[blockIdx.x + gridDim.x] = block_sum_weight;
}
}
}
}
template <typename HOST_METRIC, typename CUDA_METRIC>
void CUDAPointwiseMetricInterface<HOST_METRIC, CUDA_METRIC>::LaunchEvalKernel(const double* score, double* sum_loss, double* sum_weight) const {
const int num_blocks = (this->num_data_ + NUM_DATA_PER_EVAL_THREAD - 1) / NUM_DATA_PER_EVAL_THREAD;
if (this->cuda_weights_ != nullptr) {
hipLaunchKernelGGL(( EvalKernel<CUDA_METRIC, true>), dim3(num_blocks), dim3(NUM_DATA_PER_EVAL_THREAD), 0, 0,
this->num_data_, this->cuda_labels_, this->cuda_weights_, score, reduce_block_buffer_.RawData());
} else {
hipLaunchKernelGGL(( EvalKernel<CUDA_METRIC, false>), dim3(num_blocks), dim3(NUM_DATA_PER_EVAL_THREAD), 0, 0,
this->num_data_, this->cuda_labels_, this->cuda_weights_, score, reduce_block_buffer_.RawData());
}
ShuffleReduceSumGlobal<double, double>(reduce_block_buffer_.RawData(), num_blocks, reduce_block_buffer_inner_.RawData());
CopyFromCUDADeviceToHost<double>(sum_loss, reduce_block_buffer_inner_.RawData(), 1, __FILE__, __LINE__);
*sum_weight = static_cast<double>(this->num_data_);
if (this->cuda_weights_ != nullptr) {
ShuffleReduceSumGlobal<double, double>(reduce_block_buffer_.RawData() + num_blocks, num_blocks, reduce_block_buffer_inner_.RawData());
CopyFromCUDADeviceToHost<double>(sum_weight, reduce_block_buffer_inner_.RawData(), 1, __FILE__, __LINE__);
}
}
template void CUDAPointwiseMetricInterface<RMSEMetric, CUDARMSEMetric>::LaunchEvalKernel(const double* score, double* sum_loss, double* sum_weight) const;
template void CUDAPointwiseMetricInterface<L2Metric, CUDAL2Metric>::LaunchEvalKernel(const double* score, double* sum_loss, double* sum_weight) const;
template void CUDAPointwiseMetricInterface<BinaryLoglossMetric, CUDABinaryLoglossMetric>::LaunchEvalKernel(const double* score, double* sum_loss, double* sum_weight) const;
} // namespace LightGBM
#endif // USE_CUDA_EXP
|
c4ca4d87a01b6f7c82cc74655dfa356f38d812bd.cu
|
/*!
* Copyright (c) 2022 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for
* license information.
*/
#ifdef USE_CUDA_EXP
#include <LightGBM/cuda/cuda_algorithms.hpp>
#include "cuda_binary_metric.hpp"
#include "cuda_pointwise_metric.hpp"
#include "cuda_regression_metric.hpp"
namespace LightGBM {
template <typename CUDA_METRIC, bool USE_WEIGHTS>
__global__ void EvalKernel(const data_size_t num_data, const label_t* labels, const label_t* weights,
const double* scores, double* reduce_block_buffer) {
__shared__ double shared_mem_buffer[32];
const data_size_t index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x);
double point_metric = 0.0;
if (index < num_data) {
point_metric = USE_WEIGHTS ?
CUDA_METRIC::MetricOnPointCUDA(labels[index], scores[index]) * weights[index] :
CUDA_METRIC::MetricOnPointCUDA(labels[index], scores[index]);
}
const double block_sum_point_metric = ShuffleReduceSum<double>(point_metric, shared_mem_buffer, NUM_DATA_PER_EVAL_THREAD);
if (threadIdx.x == 0) {
reduce_block_buffer[blockIdx.x] = block_sum_point_metric;
}
if (USE_WEIGHTS) {
double weight = 0.0;
if (index < num_data) {
weight = static_cast<double>(weights[index]);
const double block_sum_weight = ShuffleReduceSum<double>(weight, shared_mem_buffer, NUM_DATA_PER_EVAL_THREAD);
if (threadIdx.x == 0) {
reduce_block_buffer[blockIdx.x + gridDim.x] = block_sum_weight;
}
}
}
}
template <typename HOST_METRIC, typename CUDA_METRIC>
void CUDAPointwiseMetricInterface<HOST_METRIC, CUDA_METRIC>::LaunchEvalKernel(const double* score, double* sum_loss, double* sum_weight) const {
const int num_blocks = (this->num_data_ + NUM_DATA_PER_EVAL_THREAD - 1) / NUM_DATA_PER_EVAL_THREAD;
if (this->cuda_weights_ != nullptr) {
EvalKernel<CUDA_METRIC, true><<<num_blocks, NUM_DATA_PER_EVAL_THREAD>>>(
this->num_data_, this->cuda_labels_, this->cuda_weights_, score, reduce_block_buffer_.RawData());
} else {
EvalKernel<CUDA_METRIC, false><<<num_blocks, NUM_DATA_PER_EVAL_THREAD>>>(
this->num_data_, this->cuda_labels_, this->cuda_weights_, score, reduce_block_buffer_.RawData());
}
ShuffleReduceSumGlobal<double, double>(reduce_block_buffer_.RawData(), num_blocks, reduce_block_buffer_inner_.RawData());
CopyFromCUDADeviceToHost<double>(sum_loss, reduce_block_buffer_inner_.RawData(), 1, __FILE__, __LINE__);
*sum_weight = static_cast<double>(this->num_data_);
if (this->cuda_weights_ != nullptr) {
ShuffleReduceSumGlobal<double, double>(reduce_block_buffer_.RawData() + num_blocks, num_blocks, reduce_block_buffer_inner_.RawData());
CopyFromCUDADeviceToHost<double>(sum_weight, reduce_block_buffer_inner_.RawData(), 1, __FILE__, __LINE__);
}
}
template void CUDAPointwiseMetricInterface<RMSEMetric, CUDARMSEMetric>::LaunchEvalKernel(const double* score, double* sum_loss, double* sum_weight) const;
template void CUDAPointwiseMetricInterface<L2Metric, CUDAL2Metric>::LaunchEvalKernel(const double* score, double* sum_loss, double* sum_weight) const;
template void CUDAPointwiseMetricInterface<BinaryLoglossMetric, CUDABinaryLoglossMetric>::LaunchEvalKernel(const double* score, double* sum_loss, double* sum_weight) const;
} // namespace LightGBM
#endif // USE_CUDA_EXP
|
bf4e1ec9a5530b6a1a482240d1781a170233ad4d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//MANDELBULB WITH CUDA
//Alexander Kuczala 2015
//[email protected]
//Input: base name of output files
//Outputs binary .rgb file of pixels encoded as 24 bit colors #RRGGBB
//can be converted to image file with program such as ImageMagick (`convert' in linux)
#include <iostream>
#include <fstream>
#include <sstream>
#include <iomanip>
#include <cmath>
#include <string>
#include <sys/time.h>
#include "Vec.cu"
#include "colorFunctions.cu"
//number of runs
const int runs = 1;
//number of frames/run
int frames = 10;
//fractal parameters
const bool isJulia = false; //if true draws a julia set instead of mandelbulb
const float power = 8; //power of recursion function
//rendering parameters
const float specularity = 0.5; //intensity of specularity highlights
const float specularExponent = 3; //larger values -> smaller highlights
const float fogDistance = 4; //distance at which fog completely occludes objects
const float lightIntensity = 1;
const float cameraAngle = 1.0; //divergence of camera rays
//fractal calculation parameters
//default values: bailout = 6, maxIterations = 8
const float bailout = 12; //value of r to terminate at, lower -> smoother, less detailed
const int maxIterations = 32; //more iterations, more accurate fractal
//ray stepping parameters
const float epsilonScale = 0.1; //default value = 1
const float minEpsilon = 1E-7; //default = 1E-7
const int stepLimit = 5000; //number of allowed marching steps (default 100)
const float rayLengthLimit = 4; //maximum ray length allowed
//(should be smaller or equal to than fog distance)
int* h_pixels; //host image array
//clip values to certain range
__device__ float clamp(float value, float lower, float upper)
{
if(value < lower)
return lower;
if(value > upper)
return upper;
return value;
}
//Ray class performs operations of single Ray/processor
//All ray functions are performed on GPU
class Ray{
public:
Vec dir; //Ray direction
//Camera parameters
Vec cameraPos; //camera position
Vec cameraTarget; //camera target
Vec cameraDir; //calculated direction of camera
Vec cameraUp; //direction of camera's y axis
Vec cameraRight; //direction of camera's x axis
//Light position
Vec lightPos;
//const bool shadowsOn = false;
//constant vector c for julia set
Vec julia;
//coloring variables
int backgroundColor;
float breakRadius; //keep track of breakout radius value for coloring
float minDist; //keep track of minimum distant of orbits in recursion
float eps; //intersection distance threshold
float pixelScale; //ray stepping size
int stepsTaken; //number of ray steps taken in last iteration
int width, height; //image dimensions
//Constructor
__device__ Ray(int i, int j, Vec cameraPos, Vec cameraTarget, int width, int height)
{
//set width and height
this->width = width;
this->height = height;
pixelScale = 1.0/width; //scale of distance between rays
//set camera parameters
Vec cameraUp(0,0,1); //set direction of camera y axis
this->cameraPos = cameraPos.copy();
this->cameraTarget = cameraTarget.copy();
this->cameraUp = cameraUp.copy();
//set light position
Vec lightPos(-2,-2,2);
this->lightPos = lightPos;
initCamera(); //set up orthogonal basis for camera
dir = rayDir(i,j);
//set julia constant
Vec julia(0.8,-0.9,-0.4);
//set background color
backgroundColor = color(100,100,100);
}
//calculate ray direction from pixel address
__device__ Vec rayDir(int i, int j)
{
//scale the camera frame vectors to create the cone of rays
float xscale = 1.0*(i-width/2.0)/width*cameraAngle;
float yscale = -1.0*(j-height/2.0)/height*cameraAngle;
Vec out = cameraDir.add(cameraRight.times(xscale)).add(cameraUp.times(yscale));
return out.unit();
}
//Single ray marching step with intital vector zed0
__device__ float traceStep(Vec zed0)
{
Vec c(0,0,0); //initialize c vector
//c is either a constant (for julia) or the starting point (mandelbulb)
if(isJulia)
c = julia;
else
c = zed0.copy();
Vec zed = zed0.copy();
//convert initial zed to spherical coordinates
float r = zed.mag();
float th = atan2(zed.y,zed.x);
float ph = asin(zed.z/r);
float dr = 1; //initial value of r derivative
minDist = -1; //initialize minimum distance
float powR, powRsin;
int n=0;
//zed iterations
for(n=0; n<= maxIterations; n++)
{
//compute scalar derivative approximation
powR = pow(r,power - 1);
dr = dr*powR*power + 1;
//iterate zed (zed = zed^p + c)
powR = pow(r,power);
powRsin = sin(power*ph);
zed.x = powR*powRsin*cos(power*th);
zed.y = powR*powRsin*sin(power*th);
zed.z = powR*cos(power*ph);
zed.addTo(c);
r = zed.mag(); //new radius
if(minDist < 0 ^ r < minDist) minDist = r; //update min distance
if(r > bailout) break; //stop iterating if r exceeds bailout value
//calculate new angles
th = atan2(zed.y, zed.x);
ph = acos(zed.z / r);
}
//memoize for coloring
breakRadius = r;
//return distance estimation value
return 0.5*r*log(r)/dr;
}
//approximate normal vector to fractal surface
__device__ Vec getNormal(Vec zed)
{
eps = eps/2.0;
//calculate small finite differences around zed
Vec zedx1 = zed.add(Vec(eps,0,0));
Vec zedx2 = zed.sub(Vec(eps,0,0));
Vec zedy1 = zed.add(Vec(0,eps,0));
Vec zedy2 = zed.sub(Vec(0,eps,0));
Vec zedz1 = zed.add(Vec(0,0,eps));
Vec zedz2 = zed.sub(Vec(0,0,eps));
//calculate normal to surface
float dx = traceStep(zedx1) - traceStep(zedx2);
float dy = traceStep(zedy1) - traceStep(zedy2);
float dz = traceStep(zedz1) - traceStep(zedz2);
Vec normal = Vec(dx,dy,dz);
normal = normal.unit();
return normal;
}
//ray stepping algorithm
__device__ float trace(Vec p0, Vec dir)
{
Vec zed0 = p0.copy(); //initial point
float rayLength = 0;
eps = minEpsilon; //initial intersection threshold
int maxSteps = int(1.0*stepLimit/epsilonScale);
float distance = 0;
int i;
for(i = 0;i<maxSteps;i++)
{
distance = traceStep(zed0); //calculate maximum distance to fractal
//step ray forward
zed0 = zed0.add(dir.times(epsilonScale*distance));
rayLength += epsilonScale*distance;
//if ray length exceeds limit, assume no intersection and stop
if(rayLength > rayLengthLimit)
return -1;
//stop if within intersection threshold
if(distance < eps) break;
//update intersection threshold
eps = max(minEpsilon,pixelScale*rayLength);
//println("eps= " + eps);
}
stepsTaken = i; //record steps taken
//assume intersection if number of steps is exhausted
//this can cause artifacts if the stepLimit is too small
return rayLength;
}
//various routines for coloring
__device__ int stepColoring()
{
int scale = 20;
float r = 1.0*(stepsTaken%scale)/scale;
float g = 1.0*((stepsTaken+scale/3)%scale)/scale;
float b = 1.0*((stepsTaken+2*scale/3)%scale)/scale;
r = abs(r-0.5)*2;
g = abs(g-0.5)*2;
b = abs(b-0.5)*2;
return color(int(r*255),int(g*255),int(b*255));
}
__device__ int minOrbitColoring()
{
float scale = 0.4;
float r,g,b;
float spam;
r = modf((minDist)/scale,&spam);
g = modf((minDist+scale/3)/scale,&spam);
b = modf((minDist+2*scale/3)/scale,&spam);
r = abs(r-0.5)*2;
g = abs(g-0.5)*2;
b = abs(b-0.5)*2;
return color(int(r*255),int(g*255),int(b*255));
}
//returns fractal color
__device__ int getCol()
{
return minOrbitColoring();
}
//simulate ambient light by shading
//based on number of steps taken and minimum orbit distance
__device__ float ambientOcclusion()
{
//const float aoStrength = 1;
const float emphasis = 0.58; //default
int maxSteps = int(stepLimit/ epsilonScale);
float ao = 1.0 - minDist*minDist;
if(ao < 0)
ao = 0;
if(ao > 1)
ao = 1;
ao = 1.0 - ao;
ao = ao*(1-1.0*stepsTaken/((float)(maxSteps))*2*emphasis);
return clamp(ao,0.0,1.0);
}
//apply fog based on distance to point
__device__ float fog(float distance)
{
return clamp(distance/fogDistance,0.0,1.0);
}
__device__ int rayTraceFractal()
{
//Vec dir = rayDir(i,j);
Vec pos = cameraPos;
float distance = trace(pos,dir); //find distance with ray marching
if(distance < 0) //negative distance means no intersection
return backgroundColor;
//intersection point of ray with surface
Vec intersect = pos.add(dir.times(distance));
//normal to surface
Vec normal = getNormal(intersect);
//shading for surface
//calculate unit vector pointing from light to object
Vec lightDir = intersect.sub(lightPos);
lightDir = lightDir.unit();
//calculate cos of angle between light ray and normal to sphere and use for shade
float normDotLight = -normal.dot(lightDir);
float shade = 0;
if(normDotLight < 0) //if dot product is - then no shading
shade = 0;
else
shade = normDotLight*lightIntensity;
shade = abs(shade);
//phong specularity-----
//reflected light vector
Vec reflect = lightDir.times(-1).sub(normal.times(2*normDotLight));
float reflDotRay = -reflect.dot(dir);
float specular = 0;
if(reflDotRay < 0)
specular = specularity*pow(abs(reflDotRay),specularExponent);
//base color is lambertian shading
int out = colorShade(getCol(),shade);
//apply specularity
out = addColors(out,colorShade(color(255,255,255),specular));
//apply ambient occulsion
out = colorShade(out,ambientOcclusion());
//check for shadows.
//if(shadowsOn)
//{
//create shadow detecting ray pointing from object to light
//place ray's origin slightly above intersection point
//push above surface by normal*eps
//Vec shadowPos = intersect.copy().add(normal.times(eps));
//Vec shadowDir = lightDir.times(-1);
//float dist = trace(pos,dir); //compute distance to fractal along ray to light
//if ray intersects a surface between object and light, cast shadow
//if(dist > 0 && dist*dist < intersect.sub(lightPos).squared())
//{
// return 0;
//}
//}
//add fog
out = averageColors(backgroundColor,out,fog(distance));
return out;
}
//calculate frame vectors for camera
__device__ void initCamera()
{
//points from camera to target
cameraDir = cameraTarget.sub(cameraPos).unit();
//use Graham Schmidt to make up vector orthogonal to dir
cameraUp = cameraUp.sub(cameraDir.times(cameraUp.dot(cameraDir)));
cameraUp = cameraUp.unit();
//calculate right pointing camera frame vector
cameraRight = cameraDir.cross(cameraUp).unit();
}
};
//end ray object----------------------------
//Kernel. One ray per thread.
__global__ void draw(int* pixels,int* width, int* height, Vec* cameraPos, Vec* cameraTarget)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
int n = (*width) * (*height);
if(index < n)
{
int i = index%(*width);
int j = index/(*width);
Ray ray(i,j,*cameraPos,*cameraTarget,*width,*height);
pixels[index] = ray.rayTraceFractal();
}
//
}
//write pixel color values as binary #RRGGBB to output file
void write(string outFileName,int width, int height)
{
ofstream outFile;
//open file for writing in binary
outFile.open(outFileName.c_str(),ios::out | ios::binary);
if(!outFile.is_open())
{
cout << "couldn't write to " << outFileName << endl;
return;
}
cout << "writing to " << outFileName << endl;
for(int i=0;i<width*height;i++)
{
int p = h_pixels[i];
//put the bits in the right order (Read from left to right)
unsigned int unp = (unsigned int)(color(getb(p),getg(p),getr(p)));
//outFile << h_pixels[i];
outFile.write((char*) &unp,3); //colors are 3 bytes long
}
outFile.close();
}
int main(int argc, char* argv[])
{
//timer parameters
struct timeval t1, t2;
struct timezone tz;
//time data arrays
double time[runs];
float kernelTime[runs];
hipError_t err;
//run loop. can vary image size etc
for(int run = 0; run< runs; run++)
{
//start timer------
gettimeofday(&t1, &tz);
//int h_width = (run+1)*100; //variable width
int h_width = 1200; //constant width
cout << "width = " << h_width << endl;
//image size on host and device
int h_height = h_width;
int* d_width;int* d_height;
int n = h_width*h_height; //number of pixels
size_t size = sizeof(int)*n;
size_t vecSize = sizeof(Vec);
//allocate pixel array on host
h_pixels = (int*)malloc(size);
int* d_pixels; //pixel array on device
//Camera position and target
Vec h_cameraPos;
Vec h_cameraTarget;
Vec* d_cameraPos;
Vec* d_cameraTarget;
//allocate memory on device
//allocate image size on device
err = hipMalloc((void **) &d_width, sizeof(int));
if(err != hipSuccess) cout << "can't allocate memory for width on device" << endl;
err = hipMalloc((void **) &d_height, sizeof(int));
if(err != hipSuccess) cout << "can't allocate memory for height on device" << endl;
//allocate pixel array on device
err = hipMalloc((void **) &d_pixels, size);
if(err != hipSuccess) cout << "can't allocate memory for pixel array on device" << endl;
//allocate camera position and target
err = hipMalloc((void **) &d_cameraPos, vecSize);
if(err != hipSuccess) cout << "can't allocate memory for cameraPos on device" << endl;
err = hipMalloc((void **) &d_cameraTarget, vecSize);
if(err != hipSuccess) cout << "can't allocate memory for cameraTarget on device" << endl;
//run animation
//set initial and final values of camera target and position
Vec cameraTargetInit(0,0,0);
Vec cameraTargetFinal(0.6025440273509881, -0.7549067847481121, 0.5049324975811623);
Vec cameraPosInit(1,-2,1.5);
Vec cameraPosFinal = cameraTargetFinal.copy();
float dt = 1.0/frames;
float t = 0;
for(int frame = 0;frame < frames; frame++)
{
cout << "Frame " << frame << "/" << frames << endl;
//move towards fractal at exponentially decaying rate
float distFrac = exp(-8*t);
h_cameraPos = cameraPosInit.times(distFrac).add(cameraPosFinal.times(1-distFrac));
h_cameraTarget = cameraTargetInit.times(distFrac).add(cameraTargetFinal.times(1-distFrac));
//copy image size to device
err = hipMemcpy(d_width, &h_width, sizeof(int), hipMemcpyHostToDevice);
if(err != hipSuccess) cout << "can't copy width to device" << endl;
err =hipMemcpy(d_height, &h_height, sizeof(int), hipMemcpyHostToDevice);
if(err != hipSuccess) cout << "can't copy height to device" << endl;
//copy camera data to device
err = hipMemcpy(d_cameraPos, &h_cameraPos, vecSize, hipMemcpyHostToDevice);
if(err != hipSuccess) cout << "can't copy cameraPos to device" << endl;
err =hipMemcpy(d_cameraTarget, &h_cameraTarget, vecSize, hipMemcpyHostToDevice);
if(err != hipSuccess) cout << "can't copy cameraTarget to device" << endl;
//start CUDA timer
hipEvent_t start, stop;
hipEventCreate(&start); hipEventCreate(&stop);
hipEventRecord(start,0); //start kernel timer
//----launch kernel-----
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock;
cout << "launching " << blocksPerGrid << " blocks of ";
cout << threadsPerBlock << " threads" << endl;
hipLaunchKernelGGL(( draw), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pixels,d_width,d_height, d_cameraPos, d_cameraTarget);
//stop CUDA timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&kernelTime[run],start,stop);
hipEventDestroy(start);hipEventDestroy(stop);
cout << "kernel time: " << kernelTime[run] << endl;
//check for kernel error
err = hipGetLastError();
if(err != hipSuccess) cout << "kernel failed: " << hipGetErrorString(err) << endl;
//copy results to hosts
err = hipMemcpy(h_pixels, d_pixels, size,hipMemcpyDeviceToHost);
if(err != hipSuccess) cout << "can't copy to host" << endl;
//if program has output filename, output to file
if(argc == 2)
{
stringstream ss;
ss << argv[1] << "_" << setfill('0') << setw(3) << frame << ".rgb" ;
//ss << argv[1] << "_" << setfill('0') << setw(4) << h_width << ".rgb" ;
string fileName;
ss >> fileName;
write(fileName,h_width,h_height);
}
//increment t
t += dt;
}
//Deallocate memory
hipFree(d_pixels);
hipFree(d_cameraTarget); hipFree(d_cameraPos);
//stop timer---
gettimeofday(&t2, &tz);
time[run] = (t2.tv_sec-t1.tv_sec) + 1e-6*(t2.tv_usec-t1.tv_usec);
cout << "Run time: " << time[run] << endl;
}
//reset GPU
err = hipDeviceReset();
if(err != hipSuccess) cout << "Couldn't reset GPU" << endl;
//print runtimes
cout << "Run times" << endl;
for(int i=0;i<runs;i++)
cout << time[i] << endl;
cout << "Kernel times" << endl;
for(int i=0;i<runs;i++)
cout << kernelTime[i] << endl;
return 0;
}
|
bf4e1ec9a5530b6a1a482240d1781a170233ad4d.cu
|
//MANDELBULB WITH CUDA
//Alexander Kuczala 2015
//[email protected]
//Input: base name of output files
//Outputs binary .rgb file of pixels encoded as 24 bit colors #RRGGBB
//can be converted to image file with program such as ImageMagick (`convert' in linux)
#include <iostream>
#include <fstream>
#include <sstream>
#include <iomanip>
#include <cmath>
#include <string>
#include <sys/time.h>
#include "Vec.cu"
#include "colorFunctions.cu"
//number of runs
const int runs = 1;
//number of frames/run
int frames = 10;
//fractal parameters
const bool isJulia = false; //if true draws a julia set instead of mandelbulb
const float power = 8; //power of recursion function
//rendering parameters
const float specularity = 0.5; //intensity of specularity highlights
const float specularExponent = 3; //larger values -> smaller highlights
const float fogDistance = 4; //distance at which fog completely occludes objects
const float lightIntensity = 1;
const float cameraAngle = 1.0; //divergence of camera rays
//fractal calculation parameters
//default values: bailout = 6, maxIterations = 8
const float bailout = 12; //value of r to terminate at, lower -> smoother, less detailed
const int maxIterations = 32; //more iterations, more accurate fractal
//ray stepping parameters
const float epsilonScale = 0.1; //default value = 1
const float minEpsilon = 1E-7; //default = 1E-7
const int stepLimit = 5000; //number of allowed marching steps (default 100)
const float rayLengthLimit = 4; //maximum ray length allowed
//(should be smaller or equal to than fog distance)
int* h_pixels; //host image array
//clip values to certain range
__device__ float clamp(float value, float lower, float upper)
{
if(value < lower)
return lower;
if(value > upper)
return upper;
return value;
}
//Ray class performs operations of single Ray/processor
//All ray functions are performed on GPU
class Ray{
public:
Vec dir; //Ray direction
//Camera parameters
Vec cameraPos; //camera position
Vec cameraTarget; //camera target
Vec cameraDir; //calculated direction of camera
Vec cameraUp; //direction of camera's y axis
Vec cameraRight; //direction of camera's x axis
//Light position
Vec lightPos;
//const bool shadowsOn = false;
//constant vector c for julia set
Vec julia;
//coloring variables
int backgroundColor;
float breakRadius; //keep track of breakout radius value for coloring
float minDist; //keep track of minimum distant of orbits in recursion
float eps; //intersection distance threshold
float pixelScale; //ray stepping size
int stepsTaken; //number of ray steps taken in last iteration
int width, height; //image dimensions
//Constructor
__device__ Ray(int i, int j, Vec cameraPos, Vec cameraTarget, int width, int height)
{
//set width and height
this->width = width;
this->height = height;
pixelScale = 1.0/width; //scale of distance between rays
//set camera parameters
Vec cameraUp(0,0,1); //set direction of camera y axis
this->cameraPos = cameraPos.copy();
this->cameraTarget = cameraTarget.copy();
this->cameraUp = cameraUp.copy();
//set light position
Vec lightPos(-2,-2,2);
this->lightPos = lightPos;
initCamera(); //set up orthogonal basis for camera
dir = rayDir(i,j);
//set julia constant
Vec julia(0.8,-0.9,-0.4);
//set background color
backgroundColor = color(100,100,100);
}
//calculate ray direction from pixel address
__device__ Vec rayDir(int i, int j)
{
//scale the camera frame vectors to create the cone of rays
float xscale = 1.0*(i-width/2.0)/width*cameraAngle;
float yscale = -1.0*(j-height/2.0)/height*cameraAngle;
Vec out = cameraDir.add(cameraRight.times(xscale)).add(cameraUp.times(yscale));
return out.unit();
}
//Single ray marching step with intital vector zed0
__device__ float traceStep(Vec zed0)
{
Vec c(0,0,0); //initialize c vector
//c is either a constant (for julia) or the starting point (mandelbulb)
if(isJulia)
c = julia;
else
c = zed0.copy();
Vec zed = zed0.copy();
//convert initial zed to spherical coordinates
float r = zed.mag();
float th = atan2(zed.y,zed.x);
float ph = asin(zed.z/r);
float dr = 1; //initial value of r derivative
minDist = -1; //initialize minimum distance
float powR, powRsin;
int n=0;
//zed iterations
for(n=0; n<= maxIterations; n++)
{
//compute scalar derivative approximation
powR = pow(r,power - 1);
dr = dr*powR*power + 1;
//iterate zed (zed = zed^p + c)
powR = pow(r,power);
powRsin = sin(power*ph);
zed.x = powR*powRsin*cos(power*th);
zed.y = powR*powRsin*sin(power*th);
zed.z = powR*cos(power*ph);
zed.addTo(c);
r = zed.mag(); //new radius
if(minDist < 0 ^ r < minDist) minDist = r; //update min distance
if(r > bailout) break; //stop iterating if r exceeds bailout value
//calculate new angles
th = atan2(zed.y, zed.x);
ph = acos(zed.z / r);
}
//memoize for coloring
breakRadius = r;
//return distance estimation value
return 0.5*r*log(r)/dr;
}
//approximate normal vector to fractal surface
__device__ Vec getNormal(Vec zed)
{
eps = eps/2.0;
//calculate small finite differences around zed
Vec zedx1 = zed.add(Vec(eps,0,0));
Vec zedx2 = zed.sub(Vec(eps,0,0));
Vec zedy1 = zed.add(Vec(0,eps,0));
Vec zedy2 = zed.sub(Vec(0,eps,0));
Vec zedz1 = zed.add(Vec(0,0,eps));
Vec zedz2 = zed.sub(Vec(0,0,eps));
//calculate normal to surface
float dx = traceStep(zedx1) - traceStep(zedx2);
float dy = traceStep(zedy1) - traceStep(zedy2);
float dz = traceStep(zedz1) - traceStep(zedz2);
Vec normal = Vec(dx,dy,dz);
normal = normal.unit();
return normal;
}
//ray stepping algorithm
__device__ float trace(Vec p0, Vec dir)
{
Vec zed0 = p0.copy(); //initial point
float rayLength = 0;
eps = minEpsilon; //initial intersection threshold
int maxSteps = int(1.0*stepLimit/epsilonScale);
float distance = 0;
int i;
for(i = 0;i<maxSteps;i++)
{
distance = traceStep(zed0); //calculate maximum distance to fractal
//step ray forward
zed0 = zed0.add(dir.times(epsilonScale*distance));
rayLength += epsilonScale*distance;
//if ray length exceeds limit, assume no intersection and stop
if(rayLength > rayLengthLimit)
return -1;
//stop if within intersection threshold
if(distance < eps) break;
//update intersection threshold
eps = max(minEpsilon,pixelScale*rayLength);
//println("eps= " + eps);
}
stepsTaken = i; //record steps taken
//assume intersection if number of steps is exhausted
//this can cause artifacts if the stepLimit is too small
return rayLength;
}
//various routines for coloring
__device__ int stepColoring()
{
int scale = 20;
float r = 1.0*(stepsTaken%scale)/scale;
float g = 1.0*((stepsTaken+scale/3)%scale)/scale;
float b = 1.0*((stepsTaken+2*scale/3)%scale)/scale;
r = abs(r-0.5)*2;
g = abs(g-0.5)*2;
b = abs(b-0.5)*2;
return color(int(r*255),int(g*255),int(b*255));
}
__device__ int minOrbitColoring()
{
float scale = 0.4;
float r,g,b;
float spam;
r = modf((minDist)/scale,&spam);
g = modf((minDist+scale/3)/scale,&spam);
b = modf((minDist+2*scale/3)/scale,&spam);
r = abs(r-0.5)*2;
g = abs(g-0.5)*2;
b = abs(b-0.5)*2;
return color(int(r*255),int(g*255),int(b*255));
}
//returns fractal color
__device__ int getCol()
{
return minOrbitColoring();
}
//simulate ambient light by shading
//based on number of steps taken and minimum orbit distance
__device__ float ambientOcclusion()
{
//const float aoStrength = 1;
const float emphasis = 0.58; //default
int maxSteps = int(stepLimit/ epsilonScale);
float ao = 1.0 - minDist*minDist;
if(ao < 0)
ao = 0;
if(ao > 1)
ao = 1;
ao = 1.0 - ao;
ao = ao*(1-1.0*stepsTaken/((float)(maxSteps))*2*emphasis);
return clamp(ao,0.0,1.0);
}
//apply fog based on distance to point
__device__ float fog(float distance)
{
return clamp(distance/fogDistance,0.0,1.0);
}
__device__ int rayTraceFractal()
{
//Vec dir = rayDir(i,j);
Vec pos = cameraPos;
float distance = trace(pos,dir); //find distance with ray marching
if(distance < 0) //negative distance means no intersection
return backgroundColor;
//intersection point of ray with surface
Vec intersect = pos.add(dir.times(distance));
//normal to surface
Vec normal = getNormal(intersect);
//shading for surface
//calculate unit vector pointing from light to object
Vec lightDir = intersect.sub(lightPos);
lightDir = lightDir.unit();
//calculate cos of angle between light ray and normal to sphere and use for shade
float normDotLight = -normal.dot(lightDir);
float shade = 0;
if(normDotLight < 0) //if dot product is - then no shading
shade = 0;
else
shade = normDotLight*lightIntensity;
shade = abs(shade);
//phong specularity-----
//reflected light vector
Vec reflect = lightDir.times(-1).sub(normal.times(2*normDotLight));
float reflDotRay = -reflect.dot(dir);
float specular = 0;
if(reflDotRay < 0)
specular = specularity*pow(abs(reflDotRay),specularExponent);
//base color is lambertian shading
int out = colorShade(getCol(),shade);
//apply specularity
out = addColors(out,colorShade(color(255,255,255),specular));
//apply ambient occulsion
out = colorShade(out,ambientOcclusion());
//check for shadows.
//if(shadowsOn)
//{
//create shadow detecting ray pointing from object to light
//place ray's origin slightly above intersection point
//push above surface by normal*eps
//Vec shadowPos = intersect.copy().add(normal.times(eps));
//Vec shadowDir = lightDir.times(-1);
//float dist = trace(pos,dir); //compute distance to fractal along ray to light
//if ray intersects a surface between object and light, cast shadow
//if(dist > 0 && dist*dist < intersect.sub(lightPos).squared())
//{
// return 0;
//}
//}
//add fog
out = averageColors(backgroundColor,out,fog(distance));
return out;
}
//calculate frame vectors for camera
__device__ void initCamera()
{
//points from camera to target
cameraDir = cameraTarget.sub(cameraPos).unit();
//use Graham Schmidt to make up vector orthogonal to dir
cameraUp = cameraUp.sub(cameraDir.times(cameraUp.dot(cameraDir)));
cameraUp = cameraUp.unit();
//calculate right pointing camera frame vector
cameraRight = cameraDir.cross(cameraUp).unit();
}
};
//end ray object----------------------------
//Kernel. One ray per thread.
__global__ void draw(int* pixels,int* width, int* height, Vec* cameraPos, Vec* cameraTarget)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
int n = (*width) * (*height);
if(index < n)
{
int i = index%(*width);
int j = index/(*width);
Ray ray(i,j,*cameraPos,*cameraTarget,*width,*height);
pixels[index] = ray.rayTraceFractal();
}
//
}
//write pixel color values as binary #RRGGBB to output file
void write(string outFileName,int width, int height)
{
ofstream outFile;
//open file for writing in binary
outFile.open(outFileName.c_str(),ios::out | ios::binary);
if(!outFile.is_open())
{
cout << "couldn't write to " << outFileName << endl;
return;
}
cout << "writing to " << outFileName << endl;
for(int i=0;i<width*height;i++)
{
int p = h_pixels[i];
//put the bits in the right order (Read from left to right)
unsigned int unp = (unsigned int)(color(getb(p),getg(p),getr(p)));
//outFile << h_pixels[i];
outFile.write((char*) &unp,3); //colors are 3 bytes long
}
outFile.close();
}
int main(int argc, char* argv[])
{
//timer parameters
struct timeval t1, t2;
struct timezone tz;
//time data arrays
double time[runs];
float kernelTime[runs];
cudaError_t err;
//run loop. can vary image size etc
for(int run = 0; run< runs; run++)
{
//start timer------
gettimeofday(&t1, &tz);
//int h_width = (run+1)*100; //variable width
int h_width = 1200; //constant width
cout << "width = " << h_width << endl;
//image size on host and device
int h_height = h_width;
int* d_width;int* d_height;
int n = h_width*h_height; //number of pixels
size_t size = sizeof(int)*n;
size_t vecSize = sizeof(Vec);
//allocate pixel array on host
h_pixels = (int*)malloc(size);
int* d_pixels; //pixel array on device
//Camera position and target
Vec h_cameraPos;
Vec h_cameraTarget;
Vec* d_cameraPos;
Vec* d_cameraTarget;
//allocate memory on device
//allocate image size on device
err = cudaMalloc((void **) &d_width, sizeof(int));
if(err != cudaSuccess) cout << "can't allocate memory for width on device" << endl;
err = cudaMalloc((void **) &d_height, sizeof(int));
if(err != cudaSuccess) cout << "can't allocate memory for height on device" << endl;
//allocate pixel array on device
err = cudaMalloc((void **) &d_pixels, size);
if(err != cudaSuccess) cout << "can't allocate memory for pixel array on device" << endl;
//allocate camera position and target
err = cudaMalloc((void **) &d_cameraPos, vecSize);
if(err != cudaSuccess) cout << "can't allocate memory for cameraPos on device" << endl;
err = cudaMalloc((void **) &d_cameraTarget, vecSize);
if(err != cudaSuccess) cout << "can't allocate memory for cameraTarget on device" << endl;
//run animation
//set initial and final values of camera target and position
Vec cameraTargetInit(0,0,0);
Vec cameraTargetFinal(0.6025440273509881, -0.7549067847481121, 0.5049324975811623);
Vec cameraPosInit(1,-2,1.5);
Vec cameraPosFinal = cameraTargetFinal.copy();
float dt = 1.0/frames;
float t = 0;
for(int frame = 0;frame < frames; frame++)
{
cout << "Frame " << frame << "/" << frames << endl;
//move towards fractal at exponentially decaying rate
float distFrac = exp(-8*t);
h_cameraPos = cameraPosInit.times(distFrac).add(cameraPosFinal.times(1-distFrac));
h_cameraTarget = cameraTargetInit.times(distFrac).add(cameraTargetFinal.times(1-distFrac));
//copy image size to device
err = cudaMemcpy(d_width, &h_width, sizeof(int), cudaMemcpyHostToDevice);
if(err != cudaSuccess) cout << "can't copy width to device" << endl;
err =cudaMemcpy(d_height, &h_height, sizeof(int), cudaMemcpyHostToDevice);
if(err != cudaSuccess) cout << "can't copy height to device" << endl;
//copy camera data to device
err = cudaMemcpy(d_cameraPos, &h_cameraPos, vecSize, cudaMemcpyHostToDevice);
if(err != cudaSuccess) cout << "can't copy cameraPos to device" << endl;
err =cudaMemcpy(d_cameraTarget, &h_cameraTarget, vecSize, cudaMemcpyHostToDevice);
if(err != cudaSuccess) cout << "can't copy cameraTarget to device" << endl;
//start CUDA timer
cudaEvent_t start, stop;
cudaEventCreate(&start); cudaEventCreate(&stop);
cudaEventRecord(start,0); //start kernel timer
//----launch kernel-----
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock;
cout << "launching " << blocksPerGrid << " blocks of ";
cout << threadsPerBlock << " threads" << endl;
draw<<<blocksPerGrid, threadsPerBlock>>>(d_pixels,d_width,d_height, d_cameraPos, d_cameraTarget);
//stop CUDA timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&kernelTime[run],start,stop);
cudaEventDestroy(start);cudaEventDestroy(stop);
cout << "kernel time: " << kernelTime[run] << endl;
//check for kernel error
err = cudaGetLastError();
if(err != cudaSuccess) cout << "kernel failed: " << cudaGetErrorString(err) << endl;
//copy results to hosts
err = cudaMemcpy(h_pixels, d_pixels, size,cudaMemcpyDeviceToHost);
if(err != cudaSuccess) cout << "can't copy to host" << endl;
//if program has output filename, output to file
if(argc == 2)
{
stringstream ss;
ss << argv[1] << "_" << setfill('0') << setw(3) << frame << ".rgb" ;
//ss << argv[1] << "_" << setfill('0') << setw(4) << h_width << ".rgb" ;
string fileName;
ss >> fileName;
write(fileName,h_width,h_height);
}
//increment t
t += dt;
}
//Deallocate memory
cudaFree(d_pixels);
cudaFree(d_cameraTarget); cudaFree(d_cameraPos);
//stop timer---
gettimeofday(&t2, &tz);
time[run] = (t2.tv_sec-t1.tv_sec) + 1e-6*(t2.tv_usec-t1.tv_usec);
cout << "Run time: " << time[run] << endl;
}
//reset GPU
err = cudaDeviceReset();
if(err != cudaSuccess) cout << "Couldn't reset GPU" << endl;
//print runtimes
cout << "Run times" << endl;
for(int i=0;i<runs;i++)
cout << time[i] << endl;
cout << "Kernel times" << endl;
for(int i=0;i<runs;i++)
cout << kernelTime[i] << endl;
return 0;
}
|
3d124d3f0dd3def657e45661684d72ffba5accb0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zgeellrtmv.cu, normal z -> d, Mon Jun 25 18:24:24 2018
*/
#include "magmasparse_internal.h"
//F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University
__global__ void
dgeellrtmv_kernel_32(
int num_rows,
int num_cols,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
double * dx,
double beta,
double * dy,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ double shared[];
if (i < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//double val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
double val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * dx[ col ];
}
shared[idb] = dot;
if ( idp < 16 ) {
shared[idb] += shared[idb+16];
if ( idp < 8 ) shared[idb] += shared[idb+8];
if ( idp < 4 ) shared[idb] += shared[idb+4];
if ( idp < 2 ) shared[idb] += shared[idb+2];
if ( idp == 0 ) {
dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i];
}
}
}
}
//F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University
__global__ void
dgeellrtmv_kernel_16(
int num_rows,
int num_cols,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
double * dx,
double beta,
double * dy,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ double shared[];
if (i < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//double val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
double val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * dx[ col ];
}
shared[idb] = dot;
if ( idp < 8 ) {
shared[idb] += shared[idb+8];
if ( idp < 4 ) shared[idb] += shared[idb+4];
if ( idp < 2 ) shared[idb] += shared[idb+2];
if ( idp == 0 ) {
dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i];
}
}
}
}
//F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University
__global__ void
dgeellrtmv_kernel_8(
int num_rows,
int num_cols,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
double * dx,
double beta,
double * dy,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ double shared[];
if (i < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//double val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
double val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * dx[ col ];
}
shared[idb] = dot;
if ( idp < 4 ) {
shared[idb] += shared[idb+4];
if ( idp < 2 ) shared[idb] += shared[idb+2];
if ( idp == 0 ) {
dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i];
}
}
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
Input format is ELLRT. The ideas are taken from
"Improving the performance of the sparse matrix
vector product with GPUs", (CIT 2010),
and modified to provide correct values.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows
@param[in]
n magma_int_t
number of columns
@param[in]
nnz_per_row magma_int_t
max number of nonzeros in a row
@param[in]
alpha double
scalar alpha
@param[in]
dval magmaDouble_ptr
val array
@param[in]
dcolind magmaIndex_ptr
col indices
@param[in]
drowlength magmaIndex_ptr
number of elements in each row
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar beta
@param[out]
dy magmaDouble_ptr
output vector y
@param[in]
blocksize magma_int_t
threads per block
@param[in]
alignment magma_int_t
threads assigned to each row
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgeellrtmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
double alpha,
magmaDouble_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowlength,
magmaDouble_ptr dx,
double beta,
magmaDouble_ptr dy,
magma_int_t alignment,
magma_int_t blocksize,
magma_queue_t queue )
{
int num_blocks = magma_ceildiv( m, blocksize );
magma_int_t num_threads = alignment*blocksize;
magma_int_t threads = alignment*blocksize;
int real_row_length = magma_roundup( nnz_per_row, alignment );
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = int( sqrt( double( num_blocks )));
int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = alignment * blocksize * sizeof( double );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads, Ms);
if ( alignment == 32 ) {
hipLaunchKernelGGL(( dgeellrtmv_kernel_32), dim3(grid), dim3(threads), Ms, queue->cuda_stream() ,
m, n, alpha, dval, dcolind, drowlength, dx, beta, dy,
alignment, real_row_length );
}
else if ( alignment == 16 ) {
hipLaunchKernelGGL(( dgeellrtmv_kernel_16), dim3(grid), dim3(threads), Ms, queue->cuda_stream() ,
m, n, alpha, dval, dcolind, drowlength, dx, beta, dy,
alignment, real_row_length );
}
else if ( alignment == 8 ) {
hipLaunchKernelGGL(( dgeellrtmv_kernel_8), dim3(grid), dim3(threads), Ms, queue->cuda_stream() ,
m, n, alpha, dval, dcolind, drowlength, dx, beta, dy,
alignment, real_row_length );
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
return MAGMA_SUCCESS;
}
|
3d124d3f0dd3def657e45661684d72ffba5accb0.cu
|
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zgeellrtmv.cu, normal z -> d, Mon Jun 25 18:24:24 2018
*/
#include "magmasparse_internal.h"
//F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University
__global__ void
dgeellrtmv_kernel_32(
int num_rows,
int num_cols,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
double * dx,
double beta,
double * dy,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ double shared[];
if (i < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//double val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
double val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * dx[ col ];
}
shared[idb] = dot;
if ( idp < 16 ) {
shared[idb] += shared[idb+16];
if ( idp < 8 ) shared[idb] += shared[idb+8];
if ( idp < 4 ) shared[idb] += shared[idb+4];
if ( idp < 2 ) shared[idb] += shared[idb+2];
if ( idp == 0 ) {
dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i];
}
}
}
}
//F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University
__global__ void
dgeellrtmv_kernel_16(
int num_rows,
int num_cols,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
double * dx,
double beta,
double * dy,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ double shared[];
if (i < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//double val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
double val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * dx[ col ];
}
shared[idb] = dot;
if ( idp < 8 ) {
shared[idb] += shared[idb+8];
if ( idp < 4 ) shared[idb] += shared[idb+4];
if ( idp < 2 ) shared[idb] += shared[idb+2];
if ( idp == 0 ) {
dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i];
}
}
}
}
//F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University
__global__ void
dgeellrtmv_kernel_8(
int num_rows,
int num_cols,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
double * dx,
double beta,
double * dy,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ double shared[];
if (i < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//double val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
double val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * dx[ col ];
}
shared[idb] = dot;
if ( idp < 4 ) {
shared[idb] += shared[idb+4];
if ( idp < 2 ) shared[idb] += shared[idb+2];
if ( idp == 0 ) {
dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i];
}
}
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
Input format is ELLRT. The ideas are taken from
"Improving the performance of the sparse matrix
vector product with GPUs", (CIT 2010),
and modified to provide correct values.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows
@param[in]
n magma_int_t
number of columns
@param[in]
nnz_per_row magma_int_t
max number of nonzeros in a row
@param[in]
alpha double
scalar alpha
@param[in]
dval magmaDouble_ptr
val array
@param[in]
dcolind magmaIndex_ptr
col indices
@param[in]
drowlength magmaIndex_ptr
number of elements in each row
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar beta
@param[out]
dy magmaDouble_ptr
output vector y
@param[in]
blocksize magma_int_t
threads per block
@param[in]
alignment magma_int_t
threads assigned to each row
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgeellrtmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
double alpha,
magmaDouble_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowlength,
magmaDouble_ptr dx,
double beta,
magmaDouble_ptr dy,
magma_int_t alignment,
magma_int_t blocksize,
magma_queue_t queue )
{
int num_blocks = magma_ceildiv( m, blocksize );
magma_int_t num_threads = alignment*blocksize;
magma_int_t threads = alignment*blocksize;
int real_row_length = magma_roundup( nnz_per_row, alignment );
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = int( sqrt( double( num_blocks )));
int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = alignment * blocksize * sizeof( double );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads, Ms);
if ( alignment == 32 ) {
dgeellrtmv_kernel_32<<< grid, threads, Ms, queue->cuda_stream() >>>
( m, n, alpha, dval, dcolind, drowlength, dx, beta, dy,
alignment, real_row_length );
}
else if ( alignment == 16 ) {
dgeellrtmv_kernel_16<<< grid, threads, Ms, queue->cuda_stream() >>>
( m, n, alpha, dval, dcolind, drowlength, dx, beta, dy,
alignment, real_row_length );
}
else if ( alignment == 8 ) {
dgeellrtmv_kernel_8<<< grid, threads, Ms, queue->cuda_stream() >>>
( m, n, alpha, dval, dcolind, drowlength, dx, beta, dy,
alignment, real_row_length );
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
return MAGMA_SUCCESS;
}
|
eb6a204b1fc4fbb5e5fa5f725a4fc1bd9829a89e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Derived from the nVIDIA CUDA 8.0 samples by
*
* Eyal Rozenberg
*
* The derivation is specifically permitted in the nVIDIA CUDA Samples EULA
* and the deriver is the owner of this code according to the EULA.
*
* Use this reasonably. If you want to discuss licensing formalities, please
* contact the author.
*/
#include "../../common.hpp"
#include "ptx.cuh"
__global__ void sequence_gpu(int *d_ptr, int length)
{
int elemID = blockIdx.x * blockDim.x + threadIdx.x;
if (elemID < length)
{
d_ptr[elemID] = ptx::special_registers::laneid();
}
}
void sequence_cpu(int *h_ptr, int length)
{
for (int elemID=0; elemID<length; elemID++)
{
h_ptr[elemID] = elemID % cuda::warp_size;
}
}
int main(int, char **)
{
if (cuda::device::count() == 0) {
die_("No CUDA devices on this system");
}
const int N = 1000;
cuda::device::current::set_to_default();
auto device = cuda::device::current::get();
auto d_ptr = cuda::memory::device::make_unique<int[]>(device, N);
auto h_ptr = cuda::memory::host::make_unique<int[]>(N);
std::cout << "Generating data on CPU\n";
sequence_cpu(h_ptr.get(), N);
auto block_size = 256;
auto grid_size = div_rounding_up(N, block_size);
auto launch_config = cuda::make_launch_config(grid_size, block_size);
device.launch(sequence_gpu, launch_config, d_ptr.get(), N);
cuda::outstanding_error::ensure_none();
device.synchronize();
auto h_d_ptr = cuda::memory::host::make_unique<int[]>(N);
cuda::memory::copy(h_d_ptr.get(), d_ptr.get(), N * sizeof(int));
auto results_are_correct = std::equal(h_ptr.get(), h_ptr.get() + N, h_d_ptr.get());
if (not results_are_correct) {
die_("Results check failed.");
}
std::cout << "SUCCESS\n";
}
|
eb6a204b1fc4fbb5e5fa5f725a4fc1bd9829a89e.cu
|
/**
* Derived from the nVIDIA CUDA 8.0 samples by
*
* Eyal Rozenberg
*
* The derivation is specifically permitted in the nVIDIA CUDA Samples EULA
* and the deriver is the owner of this code according to the EULA.
*
* Use this reasonably. If you want to discuss licensing formalities, please
* contact the author.
*/
#include "../../common.hpp"
#include "ptx.cuh"
__global__ void sequence_gpu(int *d_ptr, int length)
{
int elemID = blockIdx.x * blockDim.x + threadIdx.x;
if (elemID < length)
{
d_ptr[elemID] = ptx::special_registers::laneid();
}
}
void sequence_cpu(int *h_ptr, int length)
{
for (int elemID=0; elemID<length; elemID++)
{
h_ptr[elemID] = elemID % cuda::warp_size;
}
}
int main(int, char **)
{
if (cuda::device::count() == 0) {
die_("No CUDA devices on this system");
}
const int N = 1000;
cuda::device::current::set_to_default();
auto device = cuda::device::current::get();
auto d_ptr = cuda::memory::device::make_unique<int[]>(device, N);
auto h_ptr = cuda::memory::host::make_unique<int[]>(N);
std::cout << "Generating data on CPU\n";
sequence_cpu(h_ptr.get(), N);
auto block_size = 256;
auto grid_size = div_rounding_up(N, block_size);
auto launch_config = cuda::make_launch_config(grid_size, block_size);
device.launch(sequence_gpu, launch_config, d_ptr.get(), N);
cuda::outstanding_error::ensure_none();
device.synchronize();
auto h_d_ptr = cuda::memory::host::make_unique<int[]>(N);
cuda::memory::copy(h_d_ptr.get(), d_ptr.get(), N * sizeof(int));
auto results_are_correct = std::equal(h_ptr.get(), h_ptr.get() + N, h_d_ptr.get());
if (not results_are_correct) {
die_("Results check failed.");
}
std::cout << "SUCCESS\n";
}
|
fd893732e1b9a5271fb02de7b0ab08c908815ff4.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "include/nvgraph_convert.hxx"
#include "include/nvgraph_error.hxx"
namespace nvgraph{
void csr2coo( const int *csrSortedRowPtr,
int nnz, int m, int *cooRowInd, hipsparseIndexBase_t idxBase){
CHECK_CUSPARSE( hipsparseXcsr2coo( Cusparse::get_handle(),
csrSortedRowPtr, nnz, m, cooRowInd, idxBase ));
}
void coo2csr( const int *cooRowInd,
int nnz, int m, int *csrSortedRowPtr, hipsparseIndexBase_t idxBase){
CHECK_CUSPARSE( hipsparseXcoo2csr( Cusparse::get_handle(),
cooRowInd, nnz, m, csrSortedRowPtr, idxBase ));
}
void csr2csc( int m, int n, int nnz,
const void *csrVal, const int *csrRowPtr, const int *csrColInd,
void *cscVal, int *cscRowInd, int *cscColPtr,
hipsparseAction_t copyValues, hipsparseIndexBase_t idxBase,
hipDataType *dataType){
CHECK_CUSPARSE( cusparseCsr2cscEx( Cusparse::get_handle(),
m, n, nnz,
csrVal, *dataType, csrRowPtr, csrColInd,
cscVal, *dataType, cscRowInd, cscColPtr,
copyValues, idxBase, *dataType ));
}
void csc2csr( int m, int n, int nnz,
const void *cscVal, const int *cscRowInd, const int *cscColPtr,
void *csrVal, int *csrRowPtr, int *csrColInd,
hipsparseAction_t copyValues, hipsparseIndexBase_t idxBase,
hipDataType *dataType){
CHECK_CUSPARSE( cusparseCsr2cscEx( Cusparse::get_handle(),
m, n, nnz,
cscVal, *dataType, cscColPtr, cscRowInd,
csrVal, *dataType, csrColInd, csrRowPtr,
copyValues, idxBase, *dataType ));
}
void cooSortByDestination(int m, int n, int nnz,
const void *srcVal, const int *srcRowInd, const int *srcColInd,
void *dstVal, int *dstRowInd, int *dstColInd,
hipsparseIndexBase_t idxBase, hipDataType *dataType){
size_t pBufferSizeInBytes = 0;
std::shared_ptr<char> pBuffer;
std::shared_ptr<int> P; // permutation array
// step 0: copy src to dst
if(dstRowInd!=srcRowInd)
CHECK_CUDA( hipMemcpy(dstRowInd, srcRowInd, nnz*sizeof(int), hipMemcpyDefault) );
if(dstColInd!=srcColInd)
CHECK_CUDA( hipMemcpy(dstColInd, srcColInd, nnz*sizeof(int), hipMemcpyDefault) );
// step 1: allocate buffer (needed for cooSortByRow)
cooSortBufferSize(m, n, nnz, dstRowInd, dstColInd, &pBufferSizeInBytes);
pBuffer = allocateDevice<char>(pBufferSizeInBytes, NULL);
// step 2: setup permutation vector P to identity
P = allocateDevice<int>(nnz, NULL);
createIdentityPermutation(nnz, P.get());
// step 3: sort COO format by Row
cooGetDestinationPermutation(m, n, nnz, dstRowInd, dstColInd, P.get(), pBuffer.get());
// step 4: gather sorted cooVals
gthrX(nnz, srcVal, dstVal, P.get(), idxBase, dataType);
}
void cooSortBySource(int m, int n, int nnz,
const void *srcVal, const int *srcRowInd, const int *srcColInd,
void *dstVal, int *dstRowInd, int *dstColInd,
hipsparseIndexBase_t idxBase, hipDataType *dataType){
size_t pBufferSizeInBytes = 0;
std::shared_ptr<char> pBuffer;
std::shared_ptr<int> P; // permutation array
// step 0: copy src to dst
CHECK_CUDA( hipMemcpy(dstRowInd, srcRowInd, nnz*sizeof(int), hipMemcpyDefault) );
CHECK_CUDA( hipMemcpy(dstColInd, srcColInd, nnz*sizeof(int), hipMemcpyDefault) );
// step 1: allocate buffer (needed for cooSortByRow)
cooSortBufferSize(m, n, nnz, dstRowInd, dstColInd, &pBufferSizeInBytes);
pBuffer = allocateDevice<char>(pBufferSizeInBytes, NULL);
// step 2: setup permutation vector P to identity
P = allocateDevice<int>(nnz, NULL);
createIdentityPermutation(nnz, P.get());
// step 3: sort COO format by Row
cooGetSourcePermutation(m, n, nnz, dstRowInd, dstColInd, P.get(), pBuffer.get());
// step 4: gather sorted cooVals
gthrX(nnz, srcVal, dstVal, P.get(), idxBase, dataType);
}
void coos2csc(int m, int n, int nnz,
const void *srcVal, const int *srcRowInd, const int *srcColInd,
void *dstVal, int *dstRowInd, int *dstColPtr,
hipsparseIndexBase_t idxBase, hipDataType *dataType){
// coos -> cood -> csc
std::shared_ptr<int> tmp = allocateDevice<int>(nnz, NULL);
cooSortByDestination(m, n, nnz, srcVal, srcRowInd, srcColInd, dstVal, dstRowInd, tmp.get(), idxBase, dataType);
coo2csr(tmp.get(), nnz, m, dstColPtr, idxBase);
}
void cood2csr(int m, int n, int nnz,
const void *srcVal, const int *srcRowInd, const int *srcColInd,
void *dstVal, int *dstRowPtr, int *dstColInd,
hipsparseIndexBase_t idxBase, hipDataType *dataType){
// cood -> coos -> csr
std::shared_ptr<int> tmp = allocateDevice<int>(nnz, NULL);
cooSortBySource(m, n, nnz, srcVal, srcRowInd, srcColInd, dstVal, tmp.get(), dstColInd, idxBase, dataType);
coo2csr(tmp.get(), nnz, m, dstRowPtr, idxBase);
}
void coou2csr(int m, int n, int nnz,
const void *srcVal, const int *srcRowInd, const int *srcColInd,
void *dstVal, int *dstRowPtr, int *dstColInd,
hipsparseIndexBase_t idxBase, hipDataType *dataType){
cood2csr(m, n, nnz,
srcVal, srcRowInd, srcColInd,
dstVal, dstRowPtr, dstColInd,
idxBase, dataType);
}
void coou2csc(int m, int n, int nnz,
const void *srcVal, const int *srcRowInd, const int *srcColInd,
void *dstVal, int *dstRowInd, int *dstColPtr,
hipsparseIndexBase_t idxBase, hipDataType *dataType){
coos2csc(m, n, nnz,
srcVal, srcRowInd, srcColInd,
dstVal, dstRowInd, dstColPtr,
idxBase, dataType);
}
////////////////////////// Utility functions //////////////////////////
void createIdentityPermutation(int n, int *p){
CHECK_CUSPARSE( hipsparseCreateIdentityPermutation(Cusparse::get_handle(), n, p) );
}
void gthrX( int nnz, const void *y, void *xVal, const int *xInd,
hipsparseIndexBase_t idxBase, hipDataType *dataType){
if(*dataType==HIP_R_32F){
CHECK_CUSPARSE( hipsparseSgthr(Cusparse::get_handle(), nnz, (float*)y, (float*)xVal, xInd, idxBase ));
} else if(*dataType==HIP_R_64F) {
CHECK_CUSPARSE( hipsparseDgthr(Cusparse::get_handle(), nnz, (double*)y, (double*)xVal, xInd, idxBase ));
}
}
void cooSortBufferSize(int m, int n, int nnz, const int *cooRows, const int *cooCols, size_t *pBufferSizeInBytes) {
CHECK_CUSPARSE( hipsparseXcoosort_bufferSizeExt( Cusparse::get_handle(),
m, n, nnz,
cooRows, cooCols, pBufferSizeInBytes ));
}
void cooGetSourcePermutation(int m, int n, int nnz, int *cooRows, int *cooCols, int *p, void *pBuffer) {
CHECK_CUSPARSE( hipsparseXcoosortByRow( Cusparse::get_handle(),
m, n, nnz,
cooRows, cooCols, p, pBuffer ));
}
void cooGetDestinationPermutation(int m, int n, int nnz, int *cooRows, int *cooCols, int *p, void *pBuffer) {
CHECK_CUSPARSE( hipsparseXcoosortByColumn( Cusparse::get_handle(),
m, n, nnz,
cooRows, cooCols, p, pBuffer ));
}
} //end namespace nvgraph
|
fd893732e1b9a5271fb02de7b0ab08c908815ff4.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "include/nvgraph_convert.hxx"
#include "include/nvgraph_error.hxx"
namespace nvgraph{
void csr2coo( const int *csrSortedRowPtr,
int nnz, int m, int *cooRowInd, cusparseIndexBase_t idxBase){
CHECK_CUSPARSE( cusparseXcsr2coo( Cusparse::get_handle(),
csrSortedRowPtr, nnz, m, cooRowInd, idxBase ));
}
void coo2csr( const int *cooRowInd,
int nnz, int m, int *csrSortedRowPtr, cusparseIndexBase_t idxBase){
CHECK_CUSPARSE( cusparseXcoo2csr( Cusparse::get_handle(),
cooRowInd, nnz, m, csrSortedRowPtr, idxBase ));
}
void csr2csc( int m, int n, int nnz,
const void *csrVal, const int *csrRowPtr, const int *csrColInd,
void *cscVal, int *cscRowInd, int *cscColPtr,
cusparseAction_t copyValues, cusparseIndexBase_t idxBase,
cudaDataType_t *dataType){
CHECK_CUSPARSE( cusparseCsr2cscEx( Cusparse::get_handle(),
m, n, nnz,
csrVal, *dataType, csrRowPtr, csrColInd,
cscVal, *dataType, cscRowInd, cscColPtr,
copyValues, idxBase, *dataType ));
}
void csc2csr( int m, int n, int nnz,
const void *cscVal, const int *cscRowInd, const int *cscColPtr,
void *csrVal, int *csrRowPtr, int *csrColInd,
cusparseAction_t copyValues, cusparseIndexBase_t idxBase,
cudaDataType_t *dataType){
CHECK_CUSPARSE( cusparseCsr2cscEx( Cusparse::get_handle(),
m, n, nnz,
cscVal, *dataType, cscColPtr, cscRowInd,
csrVal, *dataType, csrColInd, csrRowPtr,
copyValues, idxBase, *dataType ));
}
void cooSortByDestination(int m, int n, int nnz,
const void *srcVal, const int *srcRowInd, const int *srcColInd,
void *dstVal, int *dstRowInd, int *dstColInd,
cusparseIndexBase_t idxBase, cudaDataType_t *dataType){
size_t pBufferSizeInBytes = 0;
std::shared_ptr<char> pBuffer;
std::shared_ptr<int> P; // permutation array
// step 0: copy src to dst
if(dstRowInd!=srcRowInd)
CHECK_CUDA( cudaMemcpy(dstRowInd, srcRowInd, nnz*sizeof(int), cudaMemcpyDefault) );
if(dstColInd!=srcColInd)
CHECK_CUDA( cudaMemcpy(dstColInd, srcColInd, nnz*sizeof(int), cudaMemcpyDefault) );
// step 1: allocate buffer (needed for cooSortByRow)
cooSortBufferSize(m, n, nnz, dstRowInd, dstColInd, &pBufferSizeInBytes);
pBuffer = allocateDevice<char>(pBufferSizeInBytes, NULL);
// step 2: setup permutation vector P to identity
P = allocateDevice<int>(nnz, NULL);
createIdentityPermutation(nnz, P.get());
// step 3: sort COO format by Row
cooGetDestinationPermutation(m, n, nnz, dstRowInd, dstColInd, P.get(), pBuffer.get());
// step 4: gather sorted cooVals
gthrX(nnz, srcVal, dstVal, P.get(), idxBase, dataType);
}
void cooSortBySource(int m, int n, int nnz,
const void *srcVal, const int *srcRowInd, const int *srcColInd,
void *dstVal, int *dstRowInd, int *dstColInd,
cusparseIndexBase_t idxBase, cudaDataType_t *dataType){
size_t pBufferSizeInBytes = 0;
std::shared_ptr<char> pBuffer;
std::shared_ptr<int> P; // permutation array
// step 0: copy src to dst
CHECK_CUDA( cudaMemcpy(dstRowInd, srcRowInd, nnz*sizeof(int), cudaMemcpyDefault) );
CHECK_CUDA( cudaMemcpy(dstColInd, srcColInd, nnz*sizeof(int), cudaMemcpyDefault) );
// step 1: allocate buffer (needed for cooSortByRow)
cooSortBufferSize(m, n, nnz, dstRowInd, dstColInd, &pBufferSizeInBytes);
pBuffer = allocateDevice<char>(pBufferSizeInBytes, NULL);
// step 2: setup permutation vector P to identity
P = allocateDevice<int>(nnz, NULL);
createIdentityPermutation(nnz, P.get());
// step 3: sort COO format by Row
cooGetSourcePermutation(m, n, nnz, dstRowInd, dstColInd, P.get(), pBuffer.get());
// step 4: gather sorted cooVals
gthrX(nnz, srcVal, dstVal, P.get(), idxBase, dataType);
}
void coos2csc(int m, int n, int nnz,
const void *srcVal, const int *srcRowInd, const int *srcColInd,
void *dstVal, int *dstRowInd, int *dstColPtr,
cusparseIndexBase_t idxBase, cudaDataType_t *dataType){
// coos -> cood -> csc
std::shared_ptr<int> tmp = allocateDevice<int>(nnz, NULL);
cooSortByDestination(m, n, nnz, srcVal, srcRowInd, srcColInd, dstVal, dstRowInd, tmp.get(), idxBase, dataType);
coo2csr(tmp.get(), nnz, m, dstColPtr, idxBase);
}
void cood2csr(int m, int n, int nnz,
const void *srcVal, const int *srcRowInd, const int *srcColInd,
void *dstVal, int *dstRowPtr, int *dstColInd,
cusparseIndexBase_t idxBase, cudaDataType_t *dataType){
// cood -> coos -> csr
std::shared_ptr<int> tmp = allocateDevice<int>(nnz, NULL);
cooSortBySource(m, n, nnz, srcVal, srcRowInd, srcColInd, dstVal, tmp.get(), dstColInd, idxBase, dataType);
coo2csr(tmp.get(), nnz, m, dstRowPtr, idxBase);
}
void coou2csr(int m, int n, int nnz,
const void *srcVal, const int *srcRowInd, const int *srcColInd,
void *dstVal, int *dstRowPtr, int *dstColInd,
cusparseIndexBase_t idxBase, cudaDataType_t *dataType){
cood2csr(m, n, nnz,
srcVal, srcRowInd, srcColInd,
dstVal, dstRowPtr, dstColInd,
idxBase, dataType);
}
void coou2csc(int m, int n, int nnz,
const void *srcVal, const int *srcRowInd, const int *srcColInd,
void *dstVal, int *dstRowInd, int *dstColPtr,
cusparseIndexBase_t idxBase, cudaDataType_t *dataType){
coos2csc(m, n, nnz,
srcVal, srcRowInd, srcColInd,
dstVal, dstRowInd, dstColPtr,
idxBase, dataType);
}
////////////////////////// Utility functions //////////////////////////
void createIdentityPermutation(int n, int *p){
CHECK_CUSPARSE( cusparseCreateIdentityPermutation(Cusparse::get_handle(), n, p) );
}
void gthrX( int nnz, const void *y, void *xVal, const int *xInd,
cusparseIndexBase_t idxBase, cudaDataType_t *dataType){
if(*dataType==CUDA_R_32F){
CHECK_CUSPARSE( cusparseSgthr(Cusparse::get_handle(), nnz, (float*)y, (float*)xVal, xInd, idxBase ));
} else if(*dataType==CUDA_R_64F) {
CHECK_CUSPARSE( cusparseDgthr(Cusparse::get_handle(), nnz, (double*)y, (double*)xVal, xInd, idxBase ));
}
}
void cooSortBufferSize(int m, int n, int nnz, const int *cooRows, const int *cooCols, size_t *pBufferSizeInBytes) {
CHECK_CUSPARSE( cusparseXcoosort_bufferSizeExt( Cusparse::get_handle(),
m, n, nnz,
cooRows, cooCols, pBufferSizeInBytes ));
}
void cooGetSourcePermutation(int m, int n, int nnz, int *cooRows, int *cooCols, int *p, void *pBuffer) {
CHECK_CUSPARSE( cusparseXcoosortByRow( Cusparse::get_handle(),
m, n, nnz,
cooRows, cooCols, p, pBuffer ));
}
void cooGetDestinationPermutation(int m, int n, int nnz, int *cooRows, int *cooCols, int *p, void *pBuffer) {
CHECK_CUSPARSE( cusparseXcoosortByColumn( Cusparse::get_handle(),
m, n, nnz,
cooRows, cooCols, p, pBuffer ));
}
} //end namespace nvgraph
|
83bc232f41f62988a65ef0b86eaffc7b41f06cd7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "init_f32.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *vector = NULL;
hipMalloc(&vector, XSIZE*YSIZE);
float value = 2;
int len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
init_f32), dim3(gridBlock),dim3(threadBlock), 0, 0, vector,value,len);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
init_f32), dim3(gridBlock),dim3(threadBlock), 0, 0, vector,value,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
init_f32), dim3(gridBlock),dim3(threadBlock), 0, 0, vector,value,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
83bc232f41f62988a65ef0b86eaffc7b41f06cd7.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "init_f32.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *vector = NULL;
cudaMalloc(&vector, XSIZE*YSIZE);
float value = 2;
int len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
init_f32<<<gridBlock,threadBlock>>>(vector,value,len);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
init_f32<<<gridBlock,threadBlock>>>(vector,value,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
init_f32<<<gridBlock,threadBlock>>>(vector,value,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
df34732b1cbe569549d6955088959090ea74bdda.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include "dock.h"
#include "gpu.cuh"
*/
/*
__device__ void
serial_InitRefMatrix_d (const int bidx, Ligand * __restrict__ mylig, const Protein * myprt)
{
if (bidx == 0) {
// lig loop, ~30
for (int l = 0; l < lna_dc; ++l) {
const int lig_t = mylig->t[l];
// prt loop, ~300
for (int p = 0; p < pnp_dc; ++p) {
const int prt_t = myprt->t[p];
const float dx = mylig->coord_new.x[l] - myprt->x[p];
const float dy = mylig->coord_new.y[l] - myprt->y[p];
const float dz = mylig->coord_new.z[l] - myprt->z[p];
const float dst = sqrtf (dx * dx + dy * dy + dz * dz);
const float pmf0 = enepara_dc->pmf0[lig_t][prt_t];
ref_matrix_dc->matrix[l][p] = (dst <= pmf0);
//ref_matrix_dc->matrix[l][p] = dst;
//printf("init %f %d %d\n", ref_matrix_dc->matrix[l][p], l, p);
} // prt loop
} // lig loop
// printf ("init lna %d pnp %d pnk %d pos %d\n", lna_dc, pnp_dc, pnk_dc, pos_dc);
}
}
*/
__device__ void
InitRefMatrix_d (const int bidx, Ligand * __restrict__ mylig, const Protein * myprt)
{
// lig loop, ~30
for (int i = 0; i < lna_dc; i += blockDim.y) {
const int l = i + threadIdx.y;
if (l < lna_dc) {
const int lig_t = mylig->t[l];
// prt loop, ~300
for (int j = 0; j < pnp_dc; j += blockDim.x) {
const int p = j + threadIdx.x;
if (p < pnp_dc) {
const int prt_t = myprt->t[p];
const float dx = mylig->coord_new.x[l] - myprt->x[p];
const float dy = mylig->coord_new.y[l] - myprt->y[p];
const float dz = mylig->coord_new.z[l] - myprt->z[p];
const float dst = sqrtf (dx * dx + dy * dy + dz * dz);
const float pmf0 = enepara_dc->pmf0[lig_t][prt_t];
ref_matrix_dc->matrix[l][p] = (dst <= pmf0);
}
} // prt loop
}
} // lig loop
}
/*
__device__ void
serial_CalcMcc_d (const int bidx, Ligand * __restrict__ mylig, const Protein * myprt)
{
if (bidx == 0) {
float tp0 = 0.0f;
float tn0 = 0.0f;
float fp0 = 0.0f;
float fn0 = 0.0f;
// lig loop, ~30
for (int l = 0; l < lna_dc; ++l) {
const int lig_t = mylig->t[l];
// prt loop, ~300
for (int p = 0; p < pnp_dc; ++p) {
//printf ("%d %d, ", l, p);
const int prt_t = myprt->t[p];
const float dx = mylig->coord_new.x[l] - myprt->x[p];
const float dy = mylig->coord_new.y[l] - myprt->y[p];
const float dz = mylig->coord_new.z[l] - myprt->z[p];
const float dst = sqrtf (dx * dx + dy * dy + dz * dz);
const float pmf0 = enepara_dc->pmf0[lig_t][prt_t];
const int ref_val = ref_matrix_dc->matrix[l][p];
//if (dst != ref_val)
//printf("calc %f %f %d %d\n", dst, ref_val, l, p);
tp0 += (ref_val == 1 && dst <= pmf0);
fn0 += (ref_val == 1 && dst > pmf0);
fp0 += (ref_val == 0 && dst <= pmf0);
tn0 += (ref_val == 0 && dst > pmf0);
} // prt loop
} // lig loop
const float dividend = sqrtf ((tp0 + fp0) * (tp0 + fn0) * (tn0 + fp0) * (tn0 + fn0));
if (dividend != 0)
mylig->energy_new.cmcc = (tp0 * tn0 - fp0 * fn0) / dividend;
else
mylig->energy_new.cmcc = CMCC_INVALID_VAL;
// printf("%f %f %f %f %f\n", tp0, fn0, fp0, tn0, dividend);
// printf("cmcc %f\n", mylig->energy_new.cmcc);
//printf ("calc lna %d pnp %d pnk %d pos %d\n", lna_dc, pnp_dc, pnk_dc, pos_dc);
}
}
*/
__device__ void
CalcMcc_d (const int bidx, Ligand * __restrict__ mylig, const Protein * myprt)
{
// reduce
__shared__ int tp[TperB];
__shared__ int fn[TperB];
__shared__ int fp[TperB];
__shared__ int tn[TperB];
tp[bidx] = 0;
fn[bidx] = 0;
fp[bidx] = 0;
tn[bidx] = 0;
__syncthreads ();
// lig loop, ~30
for (int i = 0; i < lna_dc; i += blockDim.y) {
const int l = i + threadIdx.y;
if (l < lna_dc) {
const int lig_t = mylig->t[l];
// prt loop, ~300
for (int j = 0; j < pnp_dc; j += blockDim.x) {
const int p = j + threadIdx.x;
if (p < pnp_dc) {
const int prt_t = myprt->t[p];
const float dx = mylig->coord_new.x[l] - myprt->x[p];
const float dy = mylig->coord_new.y[l] - myprt->y[p];
const float dz = mylig->coord_new.z[l] - myprt->z[p];
const float dst = sqrtf (dx * dx + dy * dy + dz * dz);
const float pmf0 = enepara_dc->pmf0[lig_t][prt_t];
const int ref_val = ref_matrix_dc->matrix[l][p];
tp[bidx] += (ref_val == 1 && dst <= pmf0);
fn[bidx] += (ref_val == 1 && dst > pmf0);
fp[bidx] += (ref_val == 0 && dst <= pmf0);
tn[bidx] += (ref_val == 0 && dst > pmf0);
}
} // prt loop
}
} // lig loop
SumReduction_int_1D_4_d (bidx, tp, fn, fp, tn);
if (bidx == 0) {
const float tp0 = (float) tp[0];
const float fn0 = (float) fn[0];
const float fp0 = (float) fp[0];
const float tn0 = (float) tn[0];
const float dividend = sqrtf ((tp0 + fp0) * (tp0 + fn0) * (tn0 + fp0) * (tn0 + fn0));
if (dividend != 0)
mylig->energy_new.cmcc = (tp0 * tn0 - fp0 * fn0) / dividend;
else
mylig->energy_new.cmcc = CMCC_INVALID_VAL;
// printf("%f %f %f %f %f\n", tp0, fn0, fp0, tn0, dividend);
// printf("cmcc %f\n", mylig->energy_new.cmcc);
}
}
|
df34732b1cbe569549d6955088959090ea74bdda.cu
|
/*
#include <cmath>
#include <cstdio>
#include <cuda.h>
#include "dock.h"
#include "gpu.cuh"
*/
/*
__device__ void
serial_InitRefMatrix_d (const int bidx, Ligand * __restrict__ mylig, const Protein * myprt)
{
if (bidx == 0) {
// lig loop, ~30
for (int l = 0; l < lna_dc; ++l) {
const int lig_t = mylig->t[l];
// prt loop, ~300
for (int p = 0; p < pnp_dc; ++p) {
const int prt_t = myprt->t[p];
const float dx = mylig->coord_new.x[l] - myprt->x[p];
const float dy = mylig->coord_new.y[l] - myprt->y[p];
const float dz = mylig->coord_new.z[l] - myprt->z[p];
const float dst = sqrtf (dx * dx + dy * dy + dz * dz);
const float pmf0 = enepara_dc->pmf0[lig_t][prt_t];
ref_matrix_dc->matrix[l][p] = (dst <= pmf0);
//ref_matrix_dc->matrix[l][p] = dst;
//printf("init %f %d %d\n", ref_matrix_dc->matrix[l][p], l, p);
} // prt loop
} // lig loop
// printf ("init lna %d pnp %d pnk %d pos %d\n", lna_dc, pnp_dc, pnk_dc, pos_dc);
}
}
*/
__device__ void
InitRefMatrix_d (const int bidx, Ligand * __restrict__ mylig, const Protein * myprt)
{
// lig loop, ~30
for (int i = 0; i < lna_dc; i += blockDim.y) {
const int l = i + threadIdx.y;
if (l < lna_dc) {
const int lig_t = mylig->t[l];
// prt loop, ~300
for (int j = 0; j < pnp_dc; j += blockDim.x) {
const int p = j + threadIdx.x;
if (p < pnp_dc) {
const int prt_t = myprt->t[p];
const float dx = mylig->coord_new.x[l] - myprt->x[p];
const float dy = mylig->coord_new.y[l] - myprt->y[p];
const float dz = mylig->coord_new.z[l] - myprt->z[p];
const float dst = sqrtf (dx * dx + dy * dy + dz * dz);
const float pmf0 = enepara_dc->pmf0[lig_t][prt_t];
ref_matrix_dc->matrix[l][p] = (dst <= pmf0);
}
} // prt loop
}
} // lig loop
}
/*
__device__ void
serial_CalcMcc_d (const int bidx, Ligand * __restrict__ mylig, const Protein * myprt)
{
if (bidx == 0) {
float tp0 = 0.0f;
float tn0 = 0.0f;
float fp0 = 0.0f;
float fn0 = 0.0f;
// lig loop, ~30
for (int l = 0; l < lna_dc; ++l) {
const int lig_t = mylig->t[l];
// prt loop, ~300
for (int p = 0; p < pnp_dc; ++p) {
//printf ("%d %d, ", l, p);
const int prt_t = myprt->t[p];
const float dx = mylig->coord_new.x[l] - myprt->x[p];
const float dy = mylig->coord_new.y[l] - myprt->y[p];
const float dz = mylig->coord_new.z[l] - myprt->z[p];
const float dst = sqrtf (dx * dx + dy * dy + dz * dz);
const float pmf0 = enepara_dc->pmf0[lig_t][prt_t];
const int ref_val = ref_matrix_dc->matrix[l][p];
//if (dst != ref_val)
//printf("calc %f %f %d %d\n", dst, ref_val, l, p);
tp0 += (ref_val == 1 && dst <= pmf0);
fn0 += (ref_val == 1 && dst > pmf0);
fp0 += (ref_val == 0 && dst <= pmf0);
tn0 += (ref_val == 0 && dst > pmf0);
} // prt loop
} // lig loop
const float dividend = sqrtf ((tp0 + fp0) * (tp0 + fn0) * (tn0 + fp0) * (tn0 + fn0));
if (dividend != 0)
mylig->energy_new.cmcc = (tp0 * tn0 - fp0 * fn0) / dividend;
else
mylig->energy_new.cmcc = CMCC_INVALID_VAL;
// printf("%f %f %f %f %f\n", tp0, fn0, fp0, tn0, dividend);
// printf("cmcc %f\n", mylig->energy_new.cmcc);
//printf ("calc lna %d pnp %d pnk %d pos %d\n", lna_dc, pnp_dc, pnk_dc, pos_dc);
}
}
*/
__device__ void
CalcMcc_d (const int bidx, Ligand * __restrict__ mylig, const Protein * myprt)
{
// reduce
__shared__ int tp[TperB];
__shared__ int fn[TperB];
__shared__ int fp[TperB];
__shared__ int tn[TperB];
tp[bidx] = 0;
fn[bidx] = 0;
fp[bidx] = 0;
tn[bidx] = 0;
__syncthreads ();
// lig loop, ~30
for (int i = 0; i < lna_dc; i += blockDim.y) {
const int l = i + threadIdx.y;
if (l < lna_dc) {
const int lig_t = mylig->t[l];
// prt loop, ~300
for (int j = 0; j < pnp_dc; j += blockDim.x) {
const int p = j + threadIdx.x;
if (p < pnp_dc) {
const int prt_t = myprt->t[p];
const float dx = mylig->coord_new.x[l] - myprt->x[p];
const float dy = mylig->coord_new.y[l] - myprt->y[p];
const float dz = mylig->coord_new.z[l] - myprt->z[p];
const float dst = sqrtf (dx * dx + dy * dy + dz * dz);
const float pmf0 = enepara_dc->pmf0[lig_t][prt_t];
const int ref_val = ref_matrix_dc->matrix[l][p];
tp[bidx] += (ref_val == 1 && dst <= pmf0);
fn[bidx] += (ref_val == 1 && dst > pmf0);
fp[bidx] += (ref_val == 0 && dst <= pmf0);
tn[bidx] += (ref_val == 0 && dst > pmf0);
}
} // prt loop
}
} // lig loop
SumReduction_int_1D_4_d (bidx, tp, fn, fp, tn);
if (bidx == 0) {
const float tp0 = (float) tp[0];
const float fn0 = (float) fn[0];
const float fp0 = (float) fp[0];
const float tn0 = (float) tn[0];
const float dividend = sqrtf ((tp0 + fp0) * (tp0 + fn0) * (tn0 + fp0) * (tn0 + fn0));
if (dividend != 0)
mylig->energy_new.cmcc = (tp0 * tn0 - fp0 * fn0) / dividend;
else
mylig->energy_new.cmcc = CMCC_INVALID_VAL;
// printf("%f %f %f %f %f\n", tp0, fn0, fp0, tn0, dividend);
// printf("cmcc %f\n", mylig->energy_new.cmcc);
}
}
|
7fb4c435d4262b99fe5c9d342764fc6e83832b09.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void conv_forward_kernel(float *y, const float *x, const float *k, const int B, const int M, const int C, const int H, const int W, const int K)
{
/*
Modify this function to implement the forward pass described in Chapter 16.
We have added an additional dimension to the tensors to support an entire mini-batch
The goal here is to be correct AND fast.
Function paramter definitions:
y - output
x - input
k - kernel
B - batch_size (number of images in x)
M - number of output feature maps
C - number of input feature maps
H - input height dimension
W - input width dimension
K - kernel height and width (K x K)
*/
const int H_out = H - K + 1;
const int W_out = W - K + 1;
// We have some nice #defs for you below to simplify indexing. Feel free to use them, or create your own.
// An example use of these macros:
// float a = y4d(0,0,0,0)
// y4d(0,0,0,0) = a
#define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0]
#define x4d(i3, i2, i1, i0) x[(i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0]
#define k4d(i3, i2, i1, i0) k[(i3) * (C * K * K) + (i2) * (K * K) + (i1) * (K) + i0]
// Insert your GPU convolution kernel code here
int n, m, h0, w0, h_base, w_base, h, w;
int X_tile_width = TILE_WIDTH + K - 1; // K=7, tile_width = 16
__shared__ float X_shared[TILE_WIDTH+6][TILE_WIDTH+6];
__shared__ float K_shared[7][7];
//extern __shared__ float shmem[];//input tile and filter
//float* X_shared = &shmem[0]; //pointer to input tile
//float* K_shared = &shmem[X_tile_width * X_tile_width]; //pointer to filter
int W_grid = ceil(W_out*1.0 / TILE_WIDTH); //# of tiles in width
n = blockIdx.x; //current channel
m = blockIdx.y; //current ouput feature map
w0 = threadIdx.x; //current height of pixel of the input tile
h0 = threadIdx.y; //current width of pixel of the input tile
//h_base = (blockIdx.z / W_grid) * X_tile_width; // vertical base out data index for the block
//w_base = (blockIdx.z % W_grid) * X_tile_width; // horizontal base out data index for the block
h_base = (blockIdx.z / W_grid) * TILE_WIDTH; // vertical base out data index for the block
w_base = (blockIdx.z % W_grid) * TILE_WIDTH; // horizontal base out data index for the block
h = h_base + h0;
w = w_base + w0;
float acc = 0.;
int c, p, q;
if(h < H_out && w < W_out){
for (c = 0; c < C; c++) { // sum over all input channels
// load weights for W [m, c,..],
// h0 and w0 used as shorthand for threadIdx.x
// and threadIdx.y
// load tile from X[n, c,] into shared memory
if (( h0 < K) && ( w0 < K)){
K_shared[h0][w0]= k4d(m, c, h0, w0);
}
__syncthreads();
for (int i = h; i < h_base + X_tile_width; i += TILE_WIDTH) {
for (int j = w; j < w_base + X_tile_width; j += TILE_WIDTH){
// if(i < H_out && j < W_out){
// X_shared[i - h_base][j - w_base] = x4d(n, c, h, w);
// }else{
// X_shared[i - h_base][j - w_base] = 0.0f;
// }
if(i < H && j < W){
X_shared[i - h_base][j - w_base] = x4d(n, c, i, j);
}else{
X_shared[i - h_base][j - w_base] = 0.0f;
}
}
}
__syncthreads();
for (p = 0; p < K; p++) {
for (q = 0; q < K; q++){
acc = acc + X_shared[h0+p][w0+q] * K_shared[p][q];
//acc = acc + X_shared[(h + p)*TILE_WIDTH+w+q] * K_shared[p*K+q];
}
}
__syncthreads();
}
y4d(n, m, h, w) = acc;
}
#undef y4d
#undef x4d
#undef k4d
}
|
7fb4c435d4262b99fe5c9d342764fc6e83832b09.cu
|
__global__ void conv_forward_kernel(float *y, const float *x, const float *k, const int B, const int M, const int C, const int H, const int W, const int K)
{
/*
Modify this function to implement the forward pass described in Chapter 16.
We have added an additional dimension to the tensors to support an entire mini-batch
The goal here is to be correct AND fast.
Function paramter definitions:
y - output
x - input
k - kernel
B - batch_size (number of images in x)
M - number of output feature maps
C - number of input feature maps
H - input height dimension
W - input width dimension
K - kernel height and width (K x K)
*/
const int H_out = H - K + 1;
const int W_out = W - K + 1;
// We have some nice #defs for you below to simplify indexing. Feel free to use them, or create your own.
// An example use of these macros:
// float a = y4d(0,0,0,0)
// y4d(0,0,0,0) = a
#define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0]
#define x4d(i3, i2, i1, i0) x[(i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0]
#define k4d(i3, i2, i1, i0) k[(i3) * (C * K * K) + (i2) * (K * K) + (i1) * (K) + i0]
// Insert your GPU convolution kernel code here
int n, m, h0, w0, h_base, w_base, h, w;
int X_tile_width = TILE_WIDTH + K - 1; // K=7, tile_width = 16
__shared__ float X_shared[TILE_WIDTH+6][TILE_WIDTH+6];
__shared__ float K_shared[7][7];
//extern __shared__ float shmem[];//input tile and filter
//float* X_shared = &shmem[0]; //pointer to input tile
//float* K_shared = &shmem[X_tile_width * X_tile_width]; //pointer to filter
int W_grid = ceil(W_out*1.0 / TILE_WIDTH); //# of tiles in width
n = blockIdx.x; //current channel
m = blockIdx.y; //current ouput feature map
w0 = threadIdx.x; //current height of pixel of the input tile
h0 = threadIdx.y; //current width of pixel of the input tile
//h_base = (blockIdx.z / W_grid) * X_tile_width; // vertical base out data index for the block
//w_base = (blockIdx.z % W_grid) * X_tile_width; // horizontal base out data index for the block
h_base = (blockIdx.z / W_grid) * TILE_WIDTH; // vertical base out data index for the block
w_base = (blockIdx.z % W_grid) * TILE_WIDTH; // horizontal base out data index for the block
h = h_base + h0;
w = w_base + w0;
float acc = 0.;
int c, p, q;
if(h < H_out && w < W_out){
for (c = 0; c < C; c++) { // sum over all input channels
// load weights for W [m, c,..],
// h0 and w0 used as shorthand for threadIdx.x
// and threadIdx.y
// load tile from X[n, c,…] into shared memory
if (( h0 < K) && ( w0 < K)){
K_shared[h0][w0]= k4d(m, c, h0, w0);
}
__syncthreads();
for (int i = h; i < h_base + X_tile_width; i += TILE_WIDTH) {
for (int j = w; j < w_base + X_tile_width; j += TILE_WIDTH){
// if(i < H_out && j < W_out){
// X_shared[i - h_base][j - w_base] = x4d(n, c, h, w);
// }else{
// X_shared[i - h_base][j - w_base] = 0.0f;
// }
if(i < H && j < W){
X_shared[i - h_base][j - w_base] = x4d(n, c, i, j);
}else{
X_shared[i - h_base][j - w_base] = 0.0f;
}
}
}
__syncthreads();
for (p = 0; p < K; p++) {
for (q = 0; q < K; q++){
acc = acc + X_shared[h0+p][w0+q] * K_shared[p][q];
//acc = acc + X_shared[(h + p)*TILE_WIDTH+w+q] * K_shared[p*K+q];
}
}
__syncthreads();
}
y4d(n, m, h, w) = acc;
}
#undef y4d
#undef x4d
#undef k4d
}
|
be92154f5fe38e1c34c51ac40760417351ab7208.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "go_ion.cuh"
//Na channel
__host__ __device__ static double alpha_m_NaT(const double v) { return 0.417*(v + 25.0) / (1 - exp(-(v + 25.0) / 10.0)); }
__host__ __device__ static double beta_m_NaT(const double v) { return 16.68*exp(-0.055*(v + 50.0)); }
__host__ __device__ static double inf_m_NaT(const double v) { return alpha_m_NaT(v) / (alpha_m_NaT(v) + beta_m_NaT(v)); }
__host__ __device__ static double tau_m_NaT(const double v) { return 1.0 / (alpha_m_NaT(v) + beta_m_NaT(v)); }
__host__ __device__ static double alpha_h_NaT(const double v) { return 0.292*exp(-0.3*(v + 50.0)); }
__host__ __device__ static double beta_h_NaT(const double v) { return 4.17 / (1 + exp(-(v + 17.0) / 5.0)); }
__host__ __device__ static double inf_h_NaT(const double v) { return alpha_h_NaT(v) / (alpha_h_NaT(v) + beta_h_NaT(v)); }
__host__ __device__ static double tau_h_NaT(const double v) { return 1.0 / (alpha_h_NaT(v) + beta_h_NaT(v)); }
__host__ __device__ static double alpha_r_NaR(const double v) { return ((1.11 - 68.5*(v - 4.48) / (exp(-(v - 4.48) / 6.8) - 1.0))*1.0e-4); }
__host__ __device__ static double beta_r_NaR(const double v) { double x = (v + 44.0) / 0.11; if (x > 200.0)x = 200.0; return ((66.0 + 21.7*(v + 44) / (exp(x) - 1.0))*1.0e-3); }
__host__ __device__ static double inf_r_NaR(const double v) { return alpha_r_NaR(v) / (alpha_r_NaR(v) + beta_r_NaR(v)); }
__host__ __device__ static double tau_r_NaR(const double v) { return 1.0 / (alpha_r_NaR(v) + beta_r_NaR(v)); }
__host__ __device__ static double alpha_s_NaR(const double v) { return (0.443*exp(-(v + 80.0) / 62.5)); }
__host__ __device__ static double beta_s_NaR(const double v) { return (0.014*exp((v + 83.3) / 16.1)); }
__host__ __device__ static double inf_s_NaR(const double v) { return alpha_s_NaR(v) / (alpha_s_NaR(v) + beta_s_NaR(v)); }
__host__ __device__ static double tau_s_NaR(const double v) { return 1.0 / (alpha_s_NaR(v) + beta_s_NaR(v)); }
__host__ __device__ static double alpha_p_NaP(const double v) { return (0.421*(v + 40.0) / (1.0 - exp(-(v + 40.0) / 5.0))); }
__host__ __device__ static double beta_p_NaP(const double v) { return (-0.287*(v + 40.0) / (1.0 - exp((v + 40.0) / 5.0))); }
__host__ __device__ static double inf_p_NaP(const double v) { return (1 / (1.0 + exp(-(v + 43.0) / 5.0))); }
__host__ __device__ static double tau_p_NaP(const double v) { return (5.0 / (alpha_p_NaP(v) + beta_p_NaP(v))); }
//Ca channel
__host__ __device__ static double alpha_ch_CaHVA(const double v) { return (0.0687 * exp(0.063*(v + 29.0))); }
__host__ __device__ static double beta_ch_CaHVA(const double v) { return (0.115 * exp(-0.039*(v + 18.66))); }
__host__ __device__ static double inf_ch_CaHVA(const double v) { return (alpha_ch_CaHVA(v) / (alpha_ch_CaHVA(v) + beta_ch_CaHVA(v))); }
__host__ __device__ static double tau_ch_CaHVA(const double v) { return (1.0 / (alpha_ch_CaHVA(v) + beta_ch_CaHVA(v))); }
__host__ __device__ static double alpha_ci_CaHVA(const double v) { return (1.8e-3*exp(-(v + 48.0) / 18.0)); }
__host__ __device__ static double beta_ci_CaHVA(const double v) { return (1.8e-3*exp((v + 48.0) / 83.0)); }
__host__ __device__ static double inf_ci_CaHVA(const double v) { return (alpha_ci_CaHVA(v) / (alpha_ci_CaHVA(v) + beta_ci_CaHVA(v))); }
__host__ __device__ static double tau_ci_CaHVA(const double v) { return (1.0 / (alpha_ci_CaHVA(v) + beta_ci_CaHVA(v))); }
__host__ __device__ static double inf_cl_CaLVA(const double v) { return (1.0 / (1.0 + exp(-(v + 52.0) / 7.4))); }
__host__ __device__ static double tau_cl_CaLVA(const double v) { return ((3.0 + 1.0 / (exp((v + 27.0) / 10.0) + exp(-(v + 102.0) / 15.0))) / 0.85); }
__host__ __device__ static double inf_cm_CaLVA(const double v) { return (1.0 / (1.0 + exp((v + 80.0) / 5.0))); }
__host__ __device__ static double tau_cm_CaLVA(const double v) { return ((85.0 + 1.0 / (exp((v + 48.0) / 4.0) + exp(-(v + 407.0) / 50.0))) / 0.9); }
//K channel
__host__ __device__ static double alpha_n_KV(const double v) { return 0.062*(v + 26.0) / (1.0 - exp(-(v + 26.0) / 10.0)); }
__host__ __device__ static double beta_n_KV(const double v) { return 0.78*exp(-(v + 36.0) / 80.0); }
__host__ __device__ static double inf_n_KV(const double v) { return (alpha_n_KV(v) / (alpha_n_KV(v) + beta_n_KV(v))); }
__host__ __device__ static double tau_n_KV(const double v) { return (1.0 / (alpha_n_KV(v) + beta_n_KV(v))); }
__host__ __device__ static double alpha_a_KA(const double v) { return (0.62 / (1.0 + exp(-(v + 9.17) / 23.32))); }
__host__ __device__ static double beta_a_KA(const double v) { return (0.126 / (exp((v + 18.28) / 19.47))); }
__host__ __device__ static double inf_a_KA(const double v) { return (1.0 / (1.0 + exp(-(v + 38.0) / 17.0))); }
__host__ __device__ static double tau_a_KA(const double v) { return (1.0 / (alpha_a_KA(v) + beta_a_KA(v))); }
__host__ __device__ static double alpha_b_KA(const double v) { return (0.028 / (1.0 + exp((v + 111.0) / 12.84))); }
__host__ __device__ static double beta_b_KA(const double v) { return(0.026 / (1.0 + exp(-(v + 49.95) / 8.9))); }
__host__ __device__ static double inf_b_KA(const double v) { return (1.0 / (1.0 + exp((v + 78.8) / 8.4))); }
__host__ __device__ static double tau_b_KA(const double v) { return (1.0 / (alpha_b_KA(v) + beta_b_KA(v))); }
__host__ __device__ static double alpha_c_KC(const double v, const double ca) { return (3.2 / (1.0 + 0.0015*exp(-(v) / 11.7) / ca)); }
__host__ __device__ static double beta_c_KC(const double v, const double ca) { return (0.46 / (1.0 + ca / (1.5e-4*exp(-(v) / 11.7)))); }
__host__ __device__ static double inf_c_KC(const double v, const double ca) { return (alpha_c_KC(v, ca) / (alpha_c_KC(v, ca) + beta_c_KC(v, ca))); }
__host__ __device__ static double tau_c_KC(const double v, const double ca) { return (1.0 / (alpha_c_KC(v, ca) + beta_c_KC(v, ca))); }
__host__ __device__ static double alpha_sl_Kslow(const double v) { return (0.0037*exp((v + 30.0) / 40.0)); }
__host__ __device__ static double beta_sl_Kslow(const double v) { return (0.0037*exp(-(v + 30.0) / 20.0)); }
__host__ __device__ static double inf_sl_Kslow(const double v) { return (1.0 / (1.0 + exp(-(v + 35.0) / 6.0))); }
__host__ __device__ static double tau_sl_Kslow(const double v) { return (1.0 / (alpha_sl_Kslow(v) + beta_sl_Kslow(v))); }
__host__ __device__ static double r_HCN1(const double v) { return (0.0021 * (v) + 0.97); }
__host__ __device__ static double inf_hf_HCN1(const double v) { return (r_HCN1(v) * (1.0 / (1.0 + exp((v + 72.5)*0.11)))); }
__host__ __device__ static double inf_hs_HCN1(const double v) { return ((1.0 - r_HCN1(v)) * (1.0 / (1.0 + exp((v + 72.5)*0.11)))); }
__host__ __device__ static double tau_hf_HCN1(const double v) { return (exp((0.0137*v + 3.37)*2.3)); }
__host__ __device__ static double tau_hs_HCN1(const double v) { return (exp((0.0145*v + 4.06)*2.3)); }
__host__ __device__ static double r_HCN2(const double v) {
//return (-0.0227 * (v + 10.0) - 1.47);
if (v >= -64.70)return 0.0;
else if (v <= -108.70) return 1.0;
else return (-0.0227 * (v) - 1.47);
}
__host__ __device__ static double inf_hf_HCN2(const double v) { return (r_HCN2(v) * (1.0 / (1.0 + exp((v + 81.9)*0.16)))); }
__host__ __device__ static double inf_hs_HCN2(const double v) { return ((1.0 - r_HCN2(v)) * (1.0 / (1.0 + exp((v + 81.9)*0.16)))); }
__host__ __device__ static double tau_hf_HCN2(const double v) { return (exp((0.027*v + 5.6)*2.3)); }
__host__ __device__ static double tau_hs_HCN2(const double v) { return (exp((0.015*v + 5.3)*2.3)); }
__global__
void go_KAHP_update_2order ( const int n, const double *ca, double *ca_old,
double *o1, double *o2, double *c1, double *c2, double *c3, double *c4, const double DT )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
double i1, i2, i3, a1, a2, b1, b2;
i1 = 80e-3;
i2 = 80e-3;
i3 = 200e-3;
a1 = 1.0;
a2 = 100e-3;
b1 = 160e-3;
b2 = 1.2;
//double k1, k2, k3, k4;
//for (int i = 0; i < n * GO_COMP; i++) {
if ( id < n * GO_COMP )
{
double l_ca = ca_old [ id ];
ca_old [ id ] = ca [ id ];
double l_dt = 2.0 / DT;
double d1 = 200 * l_ca / 3.0;
double d2 = 160 * l_ca / 3.0;
double d3 = 80 * l_ca / 3.0;
double vca_n [ 6 ] [ 6 ] = {
{ -d1 + l_dt, i1, 0.0, 0.0, 0.0, 0.0 },
{ d1, -d2 - i1 + l_dt, i2, 0.0, 0.0, 0.0 },
{ 0.0, d2, -(d3 + i2 + b1) + l_dt, i3, a1, 0.0 },
{ 0.0, 0.0, d3, -(i3 + b2) + l_dt, 0.0, a2 },
{ 0.0, 0.0, b1, 0.0, -a1 + l_dt, 0.0 },
{ 0.0, 0.0, 0.0, b2, 0.0, -a2 + l_dt }
};
double vc [ 6 ] = { c1 [ id ], c2 [ id ], c3 [ id ], c4 [ id ], o1 [ id ], o2 [ id ] };
double vcb [ 6 ] = { };
for ( int i = 0; i < 6; i++ )
{
for ( int j = 0; j < 6; j++ )
{
vcb [ i ] -= vc [ j ] * vca_n [ i ] [ j ];
}
}
l_ca = ca [ id ];
d1 = 200 * l_ca / 3.0;
d2 = 160 * l_ca / 3.0;
d3 = 80 * l_ca / 3.0;
double vca [ 6 ] [ 6 ] = {
{ -d1 - l_dt, i1, 0.0, 0.0, 0.0, 0.0 },
{ d1, -d2 - i1 - l_dt, i2, 0.0, 0.0, 0.0 },
{ 0.0, d2, -(d3 + i2 + b1) - l_dt, i3, a1, 0.0 },
{ 0.0, 0.0, d3, -(i3 + b2) - l_dt, 0.0, a2 },
{ 0.0, 0.0, b1, 0.0, -a1 - l_dt, 0.0 },
{ 0.0, 0.0, 0.0, b2, 0.0, -a2 - l_dt }
};
//////////////////// Pivot selection //////////////////
double temp;
for ( int i = 0; i < 5; i++ )
{
int pivot = i;
double p_max = fabs ( vca [ i ] [ i ] );
for ( int j = i + 1; j < 6; j++ )
{
if ( fabs ( vca [ j ] [ i ] ) > p_max )
{
pivot = j;
p_max = fabs ( vca [ j ] [ i ] );
}
}
// minimum pivot error
if ( fabs ( p_max ) < 1.0e-12 ) { printf ( "pivot error\n" ); return; }
if ( pivot != i ) // pivot exchange
{
for ( int j = i; j < 6; j++ )
{
temp = vca [ i ] [ j ];
vca [ i ] [ j ] = vca [ pivot ] [ j ];
vca [ pivot ] [ j ] = temp;
}
temp = vcb [ i ];
vcb [ i ] = vcb [ pivot ];
vcb [ pivot ] = temp;
}
//////////////////// Forward elimination //////////////////
for ( int j = i + 1; j < 6; j++ )
{
double w = vca [ j ] [ i ] / vca [ i ] [ i ];
vca [ j ] [ i ] = 0.0;
// Multiply the ith line by -a[j][i]/a[i][i] and add it to the jth line
for ( int k = i + 1; k < 6; k++ )
{
vca [ j ] [ k ] = vca [ j ] [ k ] - vca [ i ] [ k ] * w;
}
vcb [ j ] = vcb [ j ] - vcb [ i ] * w;
}
}
//////////////////// Backward elimination //////////////////
for ( int i = 6 - 1; i >= 0; i-- )
{
for( int j = i + 1; j < 6; j++)
{
vcb [ i ] = vcb [ i ] - vca [ i ] [ j ] * vcb [ j ];
vca [ i ] [ j ] = 0.0;
}
vcb [ i ] = vcb [ i ] / vca [ i ] [ i ];
vca [ i ] [ i ] = 1.0;
}
c1[id] = vcb[0];
c2[id] = vcb[1];
c3[id] = vcb[2];
c4[id] = vcb[3];
o1[id] = vcb[4];
o2[id] = vcb[5];
if ((o1[id] + o2[id] + c1[id] + c2[id] + c3[id] + c4[id] < 0.9999)
|| (o1[id] + o2[id] + c1[id] + c2[id] + c3[id] + c4[id] > 1.0001)) {
printf("KAHP error %.15f\n", o1[id] + o2[id] + c1[id] + c2[id] + c3[id] + c4[id]); //break;
}
}
}
__global__
void go_KAHP_update ( const int n, const double *ca, double *o1, double *o2, double *c1, double *c2, double *c3, double *c4, const double DT )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
double i1, i2, i3, a1, a2, b1, b2;
i1 = 80e-3;
i2 = 80e-3;
i3 = 200e-3;
a1 = 1.0;
a2 = 100e-3;
b1 = 160e-3;
b2 = 1.2;
//double k1, k2, k3, k4;
//for (int i = 0; i < n * GO_COMP; i++) {
if ( id < n * GO_COMP )
{
double d1 = 200 * ca [ id ] / 3.0;
double d2 = 160 * ca [ id ] / 3.0;
double d3 = 80 * ca [ id ] / 3.0;
double vca [ 6 ] [ 6 ] = {
{ -d1, i1, 0.0, 0.0, 0.0, 0.0 },
{ d1, -d2 - i1, i2, 0.0, 0.0, 0.0 },
{ 0.0, d2, -(d3 + i2 + b1), i3, a1, 0.0 },
{ 0.0, 0.0, d3, -(i3 + b2), 0.0, a2 },
{ 0.0, 0.0, b1, 0.0, -a1, 0.0 },
{ 0.0, 0.0, 0.0, b2, 0.0, -a2 }
};
double vcb [ 6 ] = { c1 [ id ], c2 [ id ], c3 [ id ], c4 [ id ], o1 [ id ], o2 [ id ] };
for ( int i = 0; i < 6; i++ )
{
for ( int j = 0; j < 6; j++ ) { vca [ i ] [ j ] *= - DT; }
}
for ( int i = 0; i < 6; i++ ) { vca [ i ] [ i ] += 1.0; }
//////////////////// Pivot selection //////////////////
double temp;
for ( int i = 0; i < 5; i++ )
{
int pivot = i;
double p_max = fabs ( vca [ i ] [ i ] );
for ( int j = i + 1; j < 6; j++ )
{
if ( fabs ( vca [ j ] [ i ] ) > p_max )
{
pivot = j;
p_max = fabs ( vca [ j ] [ i ] );
}
}
// minimum pivot error
if ( fabs ( p_max ) < 1.0e-12 ) { printf ( "pivot error\n" ); return; }
if ( pivot != i ) // pivot exchange
{
for ( int j = i; j < 6; j++ )
{
temp = vca [ i ] [ j ];
vca [ i ] [ j ] = vca [ pivot ] [ j ];
vca [ pivot ] [ j ] = temp;
}
temp = vcb [ i ];
vcb [ i ] = vcb [ pivot ];
vcb [ pivot ] = temp;
}
//////////////////// Forward elimination //////////////////
for ( int j = i + 1; j < 6; j++ )
{
double w = vca [ j ] [ i ] / vca [ i ] [ i ];
vca [ j ] [ i ] = 0.0;
// Multiply the ith line by -a[j][i]/a[i][i] and add it to the jth line
for ( int k = i + 1; k < 6; k++ )
{
vca [ j ] [ k ] = vca [ j ] [ k ] - vca [ i ] [ k ] * w;
}
vcb [ j ] = vcb [ j ] - vcb [ i ] * w;
}
}
//////////////////// Backward elimination //////////////////
for ( int i = 6 - 1; i >= 0; i-- )
{
for( int j = i + 1; j < 6; j++)
{
vcb [ i ] = vcb [ i ] - vca [ i ] [ j ] * vcb [ j ];
vca [ i ] [ j ] = 0.0;
}
vcb [ i ] = vcb [ i ] / vca [ i ] [ i ];
vca [ i ] [ i ] = 1.0;
}
c1[id] = vcb[0];
c2[id] = vcb[1];
c3[id] = vcb[2];
c4[id] = vcb[3];
o1[id] = vcb[4];
o2[id] = vcb[5];
if ((o1[id] + o2[id] + c1[id] + c2[id] + c3[id] + c4[id] < 0.9999)
|| (o1[id] + o2[id] + c1[id] + c2[id] + c3[id] + c4[id] > 1.0001)) {
printf("KAHP error %.15f\n", o1[id] + o2[id] + c1[id] + c2[id] + c3[id] + c4[id]); //break;
}
}
}
__global__ void go_update_ion_exp_imp ( neuron_t *d_go, neuron_solve_t *d_go_solve, const double DT )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
double **elem = d_go -> elem;
double **ion = d_go -> ion;
double **cond = d_go -> cond;
if ( id < d_go -> nc)
{
double v_val = ( elem [ v ] [ id ] + d_go_solve -> vec [ cn_v_old ] [ id ] ) / 2.0;
double Ca_val = elem [ Ca ] [ id ];
double Ca2_val = d_go -> ca2 [ id ];
double I_Ca1 = 1e-3 * cond [ g_CaHVA_go ] [ id ] / elem [ area ] [ id ] * ion [ ch_CaHVA_go ] [ id ] *
ion [ ch_CaHVA_go ] [ id ] * ion [ ci_CaHVA_go ] [ id ] * ( d_go_solve -> vec [ cn_v_old ] [ id ] - V_Ca_GO ); // I_Ca [mA/cm^2]
double I_Ca2 = 1e-3 * cond [ g_CaLVA_go ] [ id ] / elem [ area ] [ id ] * ion [ cl_CaLVA_go ] [ id ] *
ion [ cl_CaLVA_go ] [ id ] * ion [ cm_CaLVA_go ] [ id ] * ( d_go_solve -> vec [ cn_v_old ] [ id ] - ( d_go -> rev_ca2 [ id ] ) ); // I_Ca [mA/cm^2]
double k1 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val - Ca1_0_GO ) ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
double k2 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k1 / 2.0 - Ca1_0_GO ) );
double k3 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k2 / 2.0 - Ca1_0_GO ) );
double k4 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k3 - Ca1_0_GO ) );
elem [ Ca ] [ id ] += ( k1 + k2 * 2.0 + k3 * 2.0 + k4 ) / 6.0;
k1 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val - Ca1_0_GO ) ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
k2 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k1 / 2.0 - Ca1_0_GO ) );
k3 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k2 / 2.0 - Ca1_0_GO ) );
k4 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k3 - Ca1_0_GO ) );
d_go -> ca2 [ id ] += ( k1 + k2 * 2.0 + k3 * 2.0 + k4 ) / 6.0;
// Ca2 Vrev update
d_go -> rev_ca2 [ id ] = ( 1e3 ) * ( 8.313424 * ( 23.0 + 273.15 ) ) / (2 * F_GO ) * log ( Ca1OUT_GO / d_go -> ca2 [ id ] );//[mV]
Ca_val = ( Ca_val + elem [ Ca ] [ id ] ) / 2.0;
ion [ m_NaT_go ] [ id ] = ( 2.0 * DT * inf_m_NaT ( v_val ) + ( 2.0 * tau_m_NaT ( v_val ) - DT ) * ion [ m_NaT_go ] [ id ] ) / ( 2.0 * tau_m_NaT ( v_val ) + DT );
ion [ h_NaT_go ] [ id ] = ( 2.0 * DT * inf_h_NaT ( v_val ) + ( 2.0 * tau_h_NaT ( v_val ) - DT ) * ion [ h_NaT_go ] [ id ] ) / ( 2.0 * tau_h_NaT ( v_val ) + DT );
ion [ r_NaR_go ] [ id ] = ( 2.0 * DT * inf_r_NaR ( v_val ) + ( 2.0 * tau_r_NaR ( v_val ) - DT ) * ion [ r_NaR_go ] [ id ] ) / ( 2.0 * tau_r_NaR ( v_val ) + DT );
ion [ s_NaR_go ] [ id ] = ( 2.0 * DT * inf_s_NaR ( v_val ) + ( 2.0 * tau_s_NaR ( v_val ) - DT ) * ion [ s_NaR_go ] [ id ] ) / ( 2.0 * tau_s_NaR ( v_val ) + DT );
ion [ p_NaP_go ] [ id ] = ( 2.0 * DT * inf_p_NaP ( v_val ) + ( 2.0 * tau_p_NaP ( v_val ) - DT ) * ion [ p_NaP_go ] [ id ] ) / ( 2.0 * tau_p_NaP ( v_val ) + DT );
ion [ n_KV_go ] [ id ] = ( 2.0 * DT * inf_n_KV ( v_val ) + ( 2.0 * tau_n_KV ( v_val ) - DT ) * ion [ n_KV_go ] [ id ] ) / ( 2.0 * tau_n_KV ( v_val ) + DT );
ion [ a_KA_go ] [ id ] = ( 2.0 * DT * inf_a_KA ( v_val ) + ( 2.0 * tau_a_KA ( v_val ) - DT ) * ion [ a_KA_go ] [ id ] ) / ( 2.0 * tau_a_KA ( v_val ) + DT );
ion [ b_KA_go ] [ id ] = ( 2.0 * DT * inf_b_KA ( v_val ) + ( 2.0 * tau_b_KA ( v_val ) - DT ) * ion [ b_KA_go ] [ id ] ) / ( 2.0 * tau_b_KA ( v_val ) + DT );
ion [ sl_Kslow_go ] [ id ] = ( 2.0 * DT * inf_sl_Kslow ( v_val ) + ( 2.0 * tau_sl_Kslow ( v_val ) - DT ) * ion [ sl_Kslow_go ] [ id ] ) / ( 2.0 * tau_sl_Kslow ( v_val ) + DT );
ion [ c_KC_go ] [ id ] = ( 2.0 * DT * inf_c_KC ( v_val, Ca_val ) + ( 2.0 * tau_c_KC ( v_val, Ca_val ) - DT ) * ion [ c_KC_go ] [ id ] ) / ( 2.0 * tau_c_KC ( v_val, Ca_val ) + DT );
ion [ ch_CaHVA_go ] [ id ] = ( 2.0 * DT * inf_ch_CaHVA ( v_val ) + ( 2.0 * tau_ch_CaHVA ( v_val ) - DT ) * ion [ ch_CaHVA_go ] [ id ] ) / ( 2.0 * tau_ch_CaHVA ( v_val ) + DT );
ion [ ci_CaHVA_go ] [ id ] = ( 2.0 * DT * inf_ci_CaHVA ( v_val ) + ( 2.0 * tau_ci_CaHVA ( v_val ) - DT ) * ion [ ci_CaHVA_go ] [ id ] ) / ( 2.0 * tau_ci_CaHVA ( v_val ) + DT );
ion [ cl_CaLVA_go ] [ id ] = ( 2.0 * DT * inf_cl_CaLVA ( v_val ) + ( 2.0 * tau_cl_CaLVA ( v_val ) - DT ) * ion [ cl_CaLVA_go ] [ id ] ) / ( 2.0 * tau_cl_CaLVA ( v_val ) + DT );
ion [ cm_CaLVA_go ] [ id ] = ( 2.0 * DT * inf_cm_CaLVA ( v_val ) + ( 2.0 * tau_cm_CaLVA ( v_val ) - DT ) * ion [ cm_CaLVA_go ] [ id ] ) / ( 2.0 * tau_cm_CaLVA ( v_val ) + DT );
ion [ hf_HCN1_go ] [ id ] = ( 2.0 * DT * inf_hf_HCN1 ( v_val ) + ( 2.0 * tau_hf_HCN1 ( v_val ) - DT ) * ion [ hf_HCN1_go ] [ id ] ) / ( 2.0 * tau_hf_HCN1 ( v_val ) + DT );
ion [ hf_HCN2_go ] [ id ] = ( 2.0 * DT * inf_hf_HCN2 ( v_val ) + ( 2.0 * tau_hf_HCN2 ( v_val ) - DT ) * ion [ hf_HCN2_go ] [ id ] ) / ( 2.0 * tau_hf_HCN2 ( v_val ) + DT );
ion [ hs_HCN1_go ] [ id ] = ( 2.0 * DT * inf_hs_HCN1 ( v_val ) + ( 2.0 * tau_hs_HCN1 ( v_val ) - DT ) * ion [ hs_HCN1_go ] [ id ] ) / ( 2.0 * tau_hs_HCN1 ( v_val ) + DT );
ion [ hs_HCN2_go ] [ id ] = ( 2.0 * DT * inf_hs_HCN2 ( v_val ) + ( 2.0 * tau_hs_HCN2 ( v_val ) - DT ) * ion [ hs_HCN2_go ] [ id ] ) / ( 2.0 * tau_hs_HCN2 ( v_val ) + DT );
d_go_solve -> vec [ cn_v_old ] [ id ] = elem [ v ] [ id ] ;
}
}
__global__ void go_update_ion_RKC_exp_imp ( neuron_t *d_go, neuron_solve_t *d_go_solve, double *vnew, const double DT )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
double **elem = d_go -> elem;
double **ion = d_go -> ion;
double **cond = d_go -> cond;
if ( id < d_go -> nc)
{
double v_val = ( elem [ v ] [ id ] + vnew [ id ] ) / 2.0;
double Ca_val = elem [ Ca ] [ id ];
double Ca2_val = d_go -> ca2 [ id ];
double I_Ca1 = 1e-3 * cond [ g_CaHVA_go ] [ id ] / elem [ area ] [ id ] * ion [ ch_CaHVA_go ] [ id ] *
ion [ ch_CaHVA_go ] [ id ] * ion [ ci_CaHVA_go ] [ id ] * ( elem [ v ] [ id ] - V_Ca_GO ); // I_Ca [mA/cm^2]
double I_Ca2 = 1e-3 * cond [ g_CaLVA_go ] [ id ] / elem [ area ] [ id ] * ion [ cl_CaLVA_go ] [ id ] *
ion [ cl_CaLVA_go ] [ id ] * ion [ cm_CaLVA_go ] [ id ] * ( elem [ v ] [ id ] - ( d_go -> rev_ca2 [ id ] ) ); // I_Ca [mA/cm^2]
double k1 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val - Ca1_0_GO ) ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
double k2 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k1 / 2.0 - Ca1_0_GO ) );
double k3 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k2 / 2.0 - Ca1_0_GO ) );
double k4 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k3 - Ca1_0_GO ) );
elem [ Ca ] [ id ] += ( k1 + k2 * 2.0 + k3 * 2.0 + k4 ) / 6.0;
k1 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val - Ca1_0_GO ) ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
k2 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k1 / 2.0 - Ca1_0_GO ) );
k3 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k2 / 2.0 - Ca1_0_GO ) );
k4 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k3 - Ca1_0_GO ) );
d_go -> ca2 [ id ] += ( k1 + k2 * 2.0 + k3 * 2.0 + k4 ) / 6.0;
// Ca2 Vrev update
d_go -> rev_ca2 [ id ] = ( 1e3 ) * ( 8.313424 * ( 23.0 + 273.15 ) ) / (2 * F_GO ) * log ( Ca1OUT_GO / d_go -> ca2 [ id ] );//[mV]
Ca_val = ( Ca_val + elem [ Ca ] [ id ] ) / 2.0;
ion [ m_NaT_go ] [ id ] = ( 2.0 * DT * inf_m_NaT ( v_val ) + ( 2.0 * tau_m_NaT ( v_val ) - DT ) * ion [ m_NaT_go ] [ id ] ) / ( 2.0 * tau_m_NaT ( v_val ) + DT );
ion [ h_NaT_go ] [ id ] = ( 2.0 * DT * inf_h_NaT ( v_val ) + ( 2.0 * tau_h_NaT ( v_val ) - DT ) * ion [ h_NaT_go ] [ id ] ) / ( 2.0 * tau_h_NaT ( v_val ) + DT );
ion [ r_NaR_go ] [ id ] = ( 2.0 * DT * inf_r_NaR ( v_val ) + ( 2.0 * tau_r_NaR ( v_val ) - DT ) * ion [ r_NaR_go ] [ id ] ) / ( 2.0 * tau_r_NaR ( v_val ) + DT );
ion [ s_NaR_go ] [ id ] = ( 2.0 * DT * inf_s_NaR ( v_val ) + ( 2.0 * tau_s_NaR ( v_val ) - DT ) * ion [ s_NaR_go ] [ id ] ) / ( 2.0 * tau_s_NaR ( v_val ) + DT );
ion [ p_NaP_go ] [ id ] = ( 2.0 * DT * inf_p_NaP ( v_val ) + ( 2.0 * tau_p_NaP ( v_val ) - DT ) * ion [ p_NaP_go ] [ id ] ) / ( 2.0 * tau_p_NaP ( v_val ) + DT );
ion [ n_KV_go ] [ id ] = ( 2.0 * DT * inf_n_KV ( v_val ) + ( 2.0 * tau_n_KV ( v_val ) - DT ) * ion [ n_KV_go ] [ id ] ) / ( 2.0 * tau_n_KV ( v_val ) + DT );
ion [ a_KA_go ] [ id ] = ( 2.0 * DT * inf_a_KA ( v_val ) + ( 2.0 * tau_a_KA ( v_val ) - DT ) * ion [ a_KA_go ] [ id ] ) / ( 2.0 * tau_a_KA ( v_val ) + DT );
ion [ b_KA_go ] [ id ] = ( 2.0 * DT * inf_b_KA ( v_val ) + ( 2.0 * tau_b_KA ( v_val ) - DT ) * ion [ b_KA_go ] [ id ] ) / ( 2.0 * tau_b_KA ( v_val ) + DT );
ion [ sl_Kslow_go ] [ id ] = ( 2.0 * DT * inf_sl_Kslow ( v_val ) + ( 2.0 * tau_sl_Kslow ( v_val ) - DT ) * ion [ sl_Kslow_go ] [ id ] ) / ( 2.0 * tau_sl_Kslow ( v_val ) + DT );
ion [ c_KC_go ] [ id ] = ( 2.0 * DT * inf_c_KC ( v_val, Ca_val ) + ( 2.0 * tau_c_KC ( v_val, Ca_val ) - DT ) * ion [ c_KC_go ] [ id ] ) / ( 2.0 * tau_c_KC ( v_val, Ca_val ) + DT );
ion [ ch_CaHVA_go ] [ id ] = ( 2.0 * DT * inf_ch_CaHVA ( v_val ) + ( 2.0 * tau_ch_CaHVA ( v_val ) - DT ) * ion [ ch_CaHVA_go ] [ id ] ) / ( 2.0 * tau_ch_CaHVA ( v_val ) + DT );
ion [ ci_CaHVA_go ] [ id ] = ( 2.0 * DT * inf_ci_CaHVA ( v_val ) + ( 2.0 * tau_ci_CaHVA ( v_val ) - DT ) * ion [ ci_CaHVA_go ] [ id ] ) / ( 2.0 * tau_ci_CaHVA ( v_val ) + DT );
ion [ cl_CaLVA_go ] [ id ] = ( 2.0 * DT * inf_cl_CaLVA ( v_val ) + ( 2.0 * tau_cl_CaLVA ( v_val ) - DT ) * ion [ cl_CaLVA_go ] [ id ] ) / ( 2.0 * tau_cl_CaLVA ( v_val ) + DT );
ion [ cm_CaLVA_go ] [ id ] = ( 2.0 * DT * inf_cm_CaLVA ( v_val ) + ( 2.0 * tau_cm_CaLVA ( v_val ) - DT ) * ion [ cm_CaLVA_go ] [ id ] ) / ( 2.0 * tau_cm_CaLVA ( v_val ) + DT );
ion [ hf_HCN1_go ] [ id ] = ( 2.0 * DT * inf_hf_HCN1 ( v_val ) + ( 2.0 * tau_hf_HCN1 ( v_val ) - DT ) * ion [ hf_HCN1_go ] [ id ] ) / ( 2.0 * tau_hf_HCN1 ( v_val ) + DT );
ion [ hf_HCN2_go ] [ id ] = ( 2.0 * DT * inf_hf_HCN2 ( v_val ) + ( 2.0 * tau_hf_HCN2 ( v_val ) - DT ) * ion [ hf_HCN2_go ] [ id ] ) / ( 2.0 * tau_hf_HCN2 ( v_val ) + DT );
ion [ hs_HCN1_go ] [ id ] = ( 2.0 * DT * inf_hs_HCN1 ( v_val ) + ( 2.0 * tau_hs_HCN1 ( v_val ) - DT ) * ion [ hs_HCN1_go ] [ id ] ) / ( 2.0 * tau_hs_HCN1 ( v_val ) + DT );
ion [ hs_HCN2_go ] [ id ] = ( 2.0 * DT * inf_hs_HCN2 ( v_val ) + ( 2.0 * tau_hs_HCN2 ( v_val ) - DT ) * ion [ hs_HCN2_go ] [ id ] ) / ( 2.0 * tau_hs_HCN2 ( v_val ) + DT );
elem [ v ] [ id ] = vnew [ id ];
}
}
__global__ void go_update_ion ( neuron_t *d_go, neuron_solve_t *d_go_solve, const double DT )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
double **elem = d_go -> elem;
double **ion = d_go -> ion;
double **cond = d_go -> cond;
if ( id < d_go -> nc)
{
double v_val = elem [ v ] [ id ];
double Ca_val = elem [ Ca ] [ id ];
double Ca2_val = d_go -> ca2 [ id ];
double I_Ca1 = 1e-3 * cond [ g_CaHVA_go ] [ id ] / elem [ area ] [ id ] * ion [ ch_CaHVA_go ] [ id ] *
ion [ ch_CaHVA_go ] [ id ] * ion [ ci_CaHVA_go ] [ id ] * ( elem [ v ] [ id ] - V_Ca_GO ); // I_Ca [mA/cm^2]
double I_Ca2 = 1e-3 * cond [ g_CaLVA_go ] [ id ] / elem [ area ] [ id ] * ion [ cl_CaLVA_go ] [ id ] *
ion [ cl_CaLVA_go ] [ id ] * ion [ cm_CaLVA_go ] [ id ] * ( elem [ v ] [ id ] - ( d_go -> rev_ca2 [ id ] ) ); // I_Ca [mA/cm^2]
ion [ m_NaT_go ] [ id ] = inf_m_NaT ( v_val ) + ( ion [ m_NaT_go ] [ id ] - inf_m_NaT ( v_val ) ) * exp ( -DT / tau_m_NaT ( v_val ) );
ion [ h_NaT_go ] [ id ] = inf_h_NaT ( v_val ) + ( ion [ h_NaT_go ] [ id ] - inf_h_NaT ( v_val ) ) * exp ( -DT / tau_h_NaT ( v_val ) );
ion [ r_NaR_go ] [ id ] = inf_r_NaR ( v_val ) + ( ion [ r_NaR_go ] [ id ] - inf_r_NaR ( v_val ) ) * exp ( -DT / tau_r_NaR ( v_val ) );
ion [ s_NaR_go ] [ id ] = inf_s_NaR ( v_val ) + ( ion [ s_NaR_go ] [ id ] - inf_s_NaR ( v_val ) ) * exp ( -DT / tau_s_NaR ( v_val ) );
ion [ p_NaP_go ] [ id ] = inf_p_NaP ( v_val ) + ( ion [ p_NaP_go ] [ id ] - inf_p_NaP ( v_val ) ) * exp ( -DT / tau_p_NaP ( v_val ) );
ion [ n_KV_go ] [ id ] = inf_n_KV ( v_val ) + ( ion [ n_KV_go ] [ id ] - inf_n_KV ( v_val ) ) * exp ( -DT / tau_n_KV ( v_val ) );
ion [ a_KA_go ] [ id ] = inf_a_KA ( v_val ) + ( ion [ a_KA_go ] [ id ] - inf_a_KA ( v_val ) ) * exp ( -DT / tau_a_KA ( v_val ) );
ion [ b_KA_go ] [ id ] = inf_b_KA ( v_val ) + ( ion [ b_KA_go ] [ id ] - inf_b_KA ( v_val ) ) * exp ( -DT / tau_b_KA ( v_val ) );
ion [ c_KC_go ] [ id ] = inf_c_KC ( v_val , Ca_val )
+ (ion [ c_KC_go ] [ id ] - inf_c_KC ( v_val , Ca_val ) ) * exp ( -DT / tau_c_KC ( v_val , Ca_val ) );
ion [ sl_Kslow_go ] [ id ] = inf_sl_Kslow ( v_val ) + ( ion [ sl_Kslow_go ] [ id ] - inf_sl_Kslow ( v_val ) ) * exp ( -DT / tau_sl_Kslow ( v_val ) );
ion [ ch_CaHVA_go ] [ id ] = inf_ch_CaHVA( v_val ) + ( ion [ ch_CaHVA_go ] [ id ] - inf_ch_CaHVA ( v_val ) ) * exp ( -DT / tau_ch_CaHVA( v_val ) );
ion [ ci_CaHVA_go ] [ id ] = inf_ci_CaHVA( v_val ) + ( ion [ ci_CaHVA_go ] [ id ] - inf_ci_CaHVA ( v_val ) ) * exp ( -DT / tau_ci_CaHVA( v_val ) );
ion [ cl_CaLVA_go ] [ id ] = inf_cl_CaLVA( v_val ) + ( ion [ cl_CaLVA_go ] [ id ] - inf_cl_CaLVA ( v_val ) ) * exp ( -DT / tau_cl_CaLVA( v_val ) );
ion [ cm_CaLVA_go ] [ id ] = inf_cm_CaLVA( v_val ) + ( ion [ cm_CaLVA_go ] [ id ] - inf_cm_CaLVA ( v_val ) ) * exp ( -DT / tau_cm_CaLVA( v_val ) );
ion [ hf_HCN1_go ] [ id ] = inf_hf_HCN1( v_val ) + ( ion [ hf_HCN1_go ] [ id ] - inf_hf_HCN1 ( v_val ) ) * exp ( -DT / tau_hf_HCN1( v_val ) );
ion [ hf_HCN2_go ] [ id ] = inf_hf_HCN2( v_val ) + ( ion [ hf_HCN2_go ] [ id ] - inf_hf_HCN2 ( v_val ) ) * exp ( -DT / tau_hf_HCN2( v_val ) );
ion [ hs_HCN1_go ] [ id ] = inf_hs_HCN1( v_val ) + ( ion [ hs_HCN1_go ] [ id ] - inf_hs_HCN1 ( v_val ) ) * exp ( -DT / tau_hs_HCN1( v_val ) );
ion [ hs_HCN2_go ] [ id ] = inf_hs_HCN2( v_val ) + ( ion [ hs_HCN2_go ] [ id ] - inf_hs_HCN2 ( v_val ) ) * exp ( -DT / tau_hs_HCN2( v_val ) );
// integral
//elem [ Ca ] [ id ] = ( DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) + B_Ca1_GO * Ca1_0_GO) + Ca_val ) / (1.0 + DT * B_Ca1_GO);
// Euler
//double dCa = - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val - Ca1_0_GO ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
//elem [ Ca ] [ id ] += dCa * DT;
// RK4
double k1 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val - Ca1_0_GO ) ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
double k2 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k1 / 2.0 - Ca1_0_GO ) );
double k3 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k2 / 2.0 - Ca1_0_GO ) );
double k4 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k3 - Ca1_0_GO ) );
elem [ Ca ] [ id ] += ( k1 + k2 * 2.0 + k3 * 2.0 + k4 ) / 6.0;
//double Cinf = 5e-5 - I_Ca2 / ( 2.0 * 9.6485e4 * 0.2e-4 * 1.3 );
//go -> ca2 [ id ] = Cinf - (Cinf - Ca2_val) * exp ( - DT * 1.3);
// Ca2 Euler
//double dCa2 = - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val - Ca1_0_GO ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
//d_go -> ca2 [ id ] += dCa2 * DT;
// Ca2 RK4
k1 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val - Ca1_0_GO ) ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
k2 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k1 / 2.0 - Ca1_0_GO ) );
k3 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k2 / 2.0 - Ca1_0_GO ) );
k4 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k3 - Ca1_0_GO ) );
d_go -> ca2 [ id ] += ( k1 + k2 * 2.0 + k3 * 2.0 + k4 ) / 6.0;
// Ca2 Vrev update
d_go -> rev_ca2 [ id ] = ( 1e3 ) * ( 8.313424 * ( 23.0 + 273.15 ) ) / (2 * F_GO ) * log ( Ca1OUT_GO / d_go -> ca2 [ id ] );//[mV]
//double Cinf = 5e-5 - I_Ca1 / ( 2.0 * 9.6485e4 * 0.2e-4 * 1.3 );
//elem [ Ca ] [ id ] = Cinf - (Cinf - Ca_val) * exp ( - DT * 1.3);
}
}
__global__ void go_update_ion_RKC ( neuron_t *d_go, neuron_solve_t *d_go_solve, double *elem_v, const double DT )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
double **elem = d_go -> elem;
double **ion = d_go -> ion;
double **cond = d_go -> cond;
if ( id < d_go -> nc)
{
//double v_val = elem [ v ] [ id ];
double v_val = elem_v [ id ];
double Ca_val = elem [ Ca ] [ id ];
double Ca2_val = d_go -> ca2 [ id ];
double I_Ca1 = 1e-3 * cond [ g_CaHVA_go ] [ id ] / elem [ area ] [ id ] * ion [ ch_CaHVA_go ] [ id ] *
ion [ ch_CaHVA_go ] [ id ] * ion [ ci_CaHVA_go ] [ id ] * ( elem [ v ] [ id ] - V_Ca_GO ); // I_Ca [mA/cm^2]
double I_Ca2 = 1e-3 * cond [ g_CaLVA_go ] [ id ] / elem [ area ] [ id ] * ion [ cl_CaLVA_go ] [ id ] *
ion [ cl_CaLVA_go ] [ id ] * ion [ cm_CaLVA_go ] [ id ] * ( elem [ v ] [ id ] - ( d_go -> rev_ca2 [ id ] ) ); // I_Ca [mA/cm^2]
ion [ m_NaT_go ] [ id ] = inf_m_NaT ( v_val ) + ( ion [ m_NaT_go ] [ id ] - inf_m_NaT ( v_val ) ) * exp ( -DT / tau_m_NaT ( v_val ) );
ion [ h_NaT_go ] [ id ] = inf_h_NaT ( v_val ) + ( ion [ h_NaT_go ] [ id ] - inf_h_NaT ( v_val ) ) * exp ( -DT / tau_h_NaT ( v_val ) );
ion [ r_NaR_go ] [ id ] = inf_r_NaR ( v_val ) + ( ion [ r_NaR_go ] [ id ] - inf_r_NaR ( v_val ) ) * exp ( -DT / tau_r_NaR ( v_val ) );
ion [ s_NaR_go ] [ id ] = inf_s_NaR ( v_val ) + ( ion [ s_NaR_go ] [ id ] - inf_s_NaR ( v_val ) ) * exp ( -DT / tau_s_NaR ( v_val ) );
ion [ p_NaP_go ] [ id ] = inf_p_NaP ( v_val ) + ( ion [ p_NaP_go ] [ id ] - inf_p_NaP ( v_val ) ) * exp ( -DT / tau_p_NaP ( v_val ) );
ion [ n_KV_go ] [ id ] = inf_n_KV ( v_val ) + ( ion [ n_KV_go ] [ id ] - inf_n_KV ( v_val ) ) * exp ( -DT / tau_n_KV ( v_val ) );
ion [ a_KA_go ] [ id ] = inf_a_KA ( v_val ) + ( ion [ a_KA_go ] [ id ] - inf_a_KA ( v_val ) ) * exp ( -DT / tau_a_KA ( v_val ) );
ion [ b_KA_go ] [ id ] = inf_b_KA ( v_val ) + ( ion [ b_KA_go ] [ id ] - inf_b_KA ( v_val ) ) * exp ( -DT / tau_b_KA ( v_val ) );
ion [ c_KC_go ] [ id ] = inf_c_KC ( v_val , Ca_val )
+ (ion [ c_KC_go ] [ id ] - inf_c_KC ( v_val , Ca_val ) ) * exp ( -DT / tau_c_KC ( v_val , Ca_val ) );
ion [ sl_Kslow_go ] [ id ] = inf_sl_Kslow ( v_val ) + ( ion [ sl_Kslow_go ] [ id ] - inf_sl_Kslow ( v_val ) ) * exp ( -DT / tau_sl_Kslow ( v_val ) );
ion [ ch_CaHVA_go ] [ id ] = inf_ch_CaHVA( v_val ) + ( ion [ ch_CaHVA_go ] [ id ] - inf_ch_CaHVA ( v_val ) ) * exp ( -DT / tau_ch_CaHVA( v_val ) );
ion [ ci_CaHVA_go ] [ id ] = inf_ci_CaHVA( v_val ) + ( ion [ ci_CaHVA_go ] [ id ] - inf_ci_CaHVA ( v_val ) ) * exp ( -DT / tau_ci_CaHVA( v_val ) );
ion [ cl_CaLVA_go ] [ id ] = inf_cl_CaLVA( v_val ) + ( ion [ cl_CaLVA_go ] [ id ] - inf_cl_CaLVA ( v_val ) ) * exp ( -DT / tau_cl_CaLVA( v_val ) );
ion [ cm_CaLVA_go ] [ id ] = inf_cm_CaLVA( v_val ) + ( ion [ cm_CaLVA_go ] [ id ] - inf_cm_CaLVA ( v_val ) ) * exp ( -DT / tau_cm_CaLVA( v_val ) );
ion [ hf_HCN1_go ] [ id ] = inf_hf_HCN1( v_val ) + ( ion [ hf_HCN1_go ] [ id ] - inf_hf_HCN1 ( v_val ) ) * exp ( -DT / tau_hf_HCN1( v_val ) );
ion [ hf_HCN2_go ] [ id ] = inf_hf_HCN2( v_val ) + ( ion [ hf_HCN2_go ] [ id ] - inf_hf_HCN2 ( v_val ) ) * exp ( -DT / tau_hf_HCN2( v_val ) );
ion [ hs_HCN1_go ] [ id ] = inf_hs_HCN1( v_val ) + ( ion [ hs_HCN1_go ] [ id ] - inf_hs_HCN1 ( v_val ) ) * exp ( -DT / tau_hs_HCN1( v_val ) );
ion [ hs_HCN2_go ] [ id ] = inf_hs_HCN2( v_val ) + ( ion [ hs_HCN2_go ] [ id ] - inf_hs_HCN2 ( v_val ) ) * exp ( -DT / tau_hs_HCN2( v_val ) );
// integral
//elem [ Ca ] [ id ] = ( DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) + B_Ca1_GO * Ca1_0_GO) + Ca_val ) / (1.0 + DT * B_Ca1_GO);
// Euler
double dCa = - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val - Ca1_0_GO ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
elem [ Ca ] [ id ] += dCa * DT;
// RK4
//double k1 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val - Ca1_0_GO ) ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
//double k2 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k1 / 2.0 - Ca1_0_GO ) );
//double k3 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k2 / 2.0 - Ca1_0_GO ) );
//double k4 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k3 - Ca1_0_GO ) );
//elem [ Ca ] [ id ] += ( k1 + k2 * 2.0 + k3 * 2.0 + k4 ) / 6.0;
//double Cinf = 5e-5 - I_Ca2 / ( 2.0 * 9.6485e4 * 0.2e-4 * 1.3 );
//go -> ca2 [ id ] = Cinf - (Cinf - Ca2_val) * exp ( - DT * 1.3);
// Ca2 Euler
double dCa2 = - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val - Ca1_0_GO ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
d_go -> ca2 [ id ] += dCa2 * DT;
// Ca2 RK4
//k1 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val - Ca1_0_GO ) ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
//k2 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k1 / 2.0 - Ca1_0_GO ) );
//k3 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k2 / 2.0 - Ca1_0_GO ) );
//k4 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k3 - Ca1_0_GO ) );
//d_go -> ca2 [ id ] += ( k1 + k2 * 2.0 + k3 * 2.0 + k4 ) / 6.0;
// Ca2 Vrev update
d_go -> rev_ca2 [ id ] = ( 1e3 ) * ( 8.313424 * ( 23.0 + 273.15 ) ) / (2 * F_GO ) * log ( Ca1OUT_GO / d_go -> ca2 [ id ] );//[mV]
//double Cinf = 5e-5 - I_Ca1 / ( 2.0 * 9.6485e4 * 0.2e-4 * 1.3 );
//elem [ Ca ] [ id ] = Cinf - (Cinf - Ca_val) * exp ( - DT * 1.3);
}
}
__host__ void go_initialize_ion ( neuron_t *go )
{
double **elem = go -> elem;
double **ion = go -> ion;
double init_v_rand = 0.0;
for ( int i = 0; i < go -> nc; i++) {
if ( i % GO_COMP == 0 )
init_v_rand = ( ( double ) rand ( ) / RAND_MAX ) - 0.5;
elem [ v ] [ i ] = V_INIT_GO + 2.0 * init_v_rand;
elem [ Ca ] [ i ] = Ca1_0_GO;
go -> ca2 [ i ] = Ca1_0_GO;
go -> rev_ca2 [ i ] = V_Ca_GO;
go -> ca_old [ i ] = Ca1_0_GO;
elem [ i_ext ] [ i ] = 0.0;
double v_val = elem [ v ] [ i ];
double ca_val = elem [ Ca ] [ i ];
ion [ m_NaT_go ] [ i ] = inf_m_NaT ( v_val );
ion [ h_NaT_go ] [ i ] = inf_h_NaT ( v_val );
ion [ r_NaR_go ] [ i ] = inf_r_NaR ( v_val );
ion [ s_NaR_go ] [ i ] = inf_s_NaR ( v_val );
ion [ p_NaP_go ] [ i ] = inf_p_NaP ( v_val );
ion [ n_KV_go ] [ i ] = inf_n_KV ( v_val );
ion [ a_KA_go ] [ i ] = inf_a_KA ( v_val );
ion [ b_KA_go ] [ i ] = inf_b_KA ( v_val );
ion [ c_KC_go ] [ i ] = inf_c_KC ( v_val, ca_val );
ion [ sl_Kslow_go ] [ i ] = inf_sl_Kslow ( v_val );
ion [ ch_CaHVA_go ] [ i ] = inf_ch_CaHVA ( v_val );
ion [ ci_CaHVA_go ] [ i ] = inf_ci_CaHVA ( v_val );
ion [ cl_CaLVA_go ] [ i ] = inf_cl_CaLVA ( v_val );
ion [ cm_CaLVA_go ] [ i ] = inf_cm_CaLVA ( v_val );
ion [ hf_HCN1_go ] [ i ] = inf_hf_HCN1 ( v_val );
ion [ hf_HCN2_go ] [ i ] = inf_hf_HCN2 ( v_val );
ion [ hs_HCN1_go ] [ i ] = inf_hs_HCN1 ( v_val );
ion [ hs_HCN2_go ] [ i ] = inf_hs_HCN2 ( v_val );
ion [ o1_KAHP_go ] [ i ] = 0.0;
ion [ o2_KAHP_go ] [ i ] = 0.0;
ion [ c2_KAHP_go ] [ i ] = 0.0;
ion [ c3_KAHP_go ] [ i ] = 0.0;
ion [ c4_KAHP_go ] [ i ] = 0.0;
ion [ c1_KAHP_go ] [ i ] = 1.0;
}
}
|
be92154f5fe38e1c34c51ac40760417351ab7208.cu
|
#include "go_ion.cuh"
//Na channel
__host__ __device__ static double alpha_m_NaT(const double v) { return 0.417*(v + 25.0) / (1 - exp(-(v + 25.0) / 10.0)); }
__host__ __device__ static double beta_m_NaT(const double v) { return 16.68*exp(-0.055*(v + 50.0)); }
__host__ __device__ static double inf_m_NaT(const double v) { return alpha_m_NaT(v) / (alpha_m_NaT(v) + beta_m_NaT(v)); }
__host__ __device__ static double tau_m_NaT(const double v) { return 1.0 / (alpha_m_NaT(v) + beta_m_NaT(v)); }
__host__ __device__ static double alpha_h_NaT(const double v) { return 0.292*exp(-0.3*(v + 50.0)); }
__host__ __device__ static double beta_h_NaT(const double v) { return 4.17 / (1 + exp(-(v + 17.0) / 5.0)); }
__host__ __device__ static double inf_h_NaT(const double v) { return alpha_h_NaT(v) / (alpha_h_NaT(v) + beta_h_NaT(v)); }
__host__ __device__ static double tau_h_NaT(const double v) { return 1.0 / (alpha_h_NaT(v) + beta_h_NaT(v)); }
__host__ __device__ static double alpha_r_NaR(const double v) { return ((1.11 - 68.5*(v - 4.48) / (exp(-(v - 4.48) / 6.8) - 1.0))*1.0e-4); }
__host__ __device__ static double beta_r_NaR(const double v) { double x = (v + 44.0) / 0.11; if (x > 200.0)x = 200.0; return ((66.0 + 21.7*(v + 44) / (exp(x) - 1.0))*1.0e-3); }
__host__ __device__ static double inf_r_NaR(const double v) { return alpha_r_NaR(v) / (alpha_r_NaR(v) + beta_r_NaR(v)); }
__host__ __device__ static double tau_r_NaR(const double v) { return 1.0 / (alpha_r_NaR(v) + beta_r_NaR(v)); }
__host__ __device__ static double alpha_s_NaR(const double v) { return (0.443*exp(-(v + 80.0) / 62.5)); }
__host__ __device__ static double beta_s_NaR(const double v) { return (0.014*exp((v + 83.3) / 16.1)); }
__host__ __device__ static double inf_s_NaR(const double v) { return alpha_s_NaR(v) / (alpha_s_NaR(v) + beta_s_NaR(v)); }
__host__ __device__ static double tau_s_NaR(const double v) { return 1.0 / (alpha_s_NaR(v) + beta_s_NaR(v)); }
__host__ __device__ static double alpha_p_NaP(const double v) { return (0.421*(v + 40.0) / (1.0 - exp(-(v + 40.0) / 5.0))); }
__host__ __device__ static double beta_p_NaP(const double v) { return (-0.287*(v + 40.0) / (1.0 - exp((v + 40.0) / 5.0))); }
__host__ __device__ static double inf_p_NaP(const double v) { return (1 / (1.0 + exp(-(v + 43.0) / 5.0))); }
__host__ __device__ static double tau_p_NaP(const double v) { return (5.0 / (alpha_p_NaP(v) + beta_p_NaP(v))); }
//Ca channel
__host__ __device__ static double alpha_ch_CaHVA(const double v) { return (0.0687 * exp(0.063*(v + 29.0))); }
__host__ __device__ static double beta_ch_CaHVA(const double v) { return (0.115 * exp(-0.039*(v + 18.66))); }
__host__ __device__ static double inf_ch_CaHVA(const double v) { return (alpha_ch_CaHVA(v) / (alpha_ch_CaHVA(v) + beta_ch_CaHVA(v))); }
__host__ __device__ static double tau_ch_CaHVA(const double v) { return (1.0 / (alpha_ch_CaHVA(v) + beta_ch_CaHVA(v))); }
__host__ __device__ static double alpha_ci_CaHVA(const double v) { return (1.8e-3*exp(-(v + 48.0) / 18.0)); }
__host__ __device__ static double beta_ci_CaHVA(const double v) { return (1.8e-3*exp((v + 48.0) / 83.0)); }
__host__ __device__ static double inf_ci_CaHVA(const double v) { return (alpha_ci_CaHVA(v) / (alpha_ci_CaHVA(v) + beta_ci_CaHVA(v))); }
__host__ __device__ static double tau_ci_CaHVA(const double v) { return (1.0 / (alpha_ci_CaHVA(v) + beta_ci_CaHVA(v))); }
__host__ __device__ static double inf_cl_CaLVA(const double v) { return (1.0 / (1.0 + exp(-(v + 52.0) / 7.4))); }
__host__ __device__ static double tau_cl_CaLVA(const double v) { return ((3.0 + 1.0 / (exp((v + 27.0) / 10.0) + exp(-(v + 102.0) / 15.0))) / 0.85); }
__host__ __device__ static double inf_cm_CaLVA(const double v) { return (1.0 / (1.0 + exp((v + 80.0) / 5.0))); }
__host__ __device__ static double tau_cm_CaLVA(const double v) { return ((85.0 + 1.0 / (exp((v + 48.0) / 4.0) + exp(-(v + 407.0) / 50.0))) / 0.9); }
//K channel
__host__ __device__ static double alpha_n_KV(const double v) { return 0.062*(v + 26.0) / (1.0 - exp(-(v + 26.0) / 10.0)); }
__host__ __device__ static double beta_n_KV(const double v) { return 0.78*exp(-(v + 36.0) / 80.0); }
__host__ __device__ static double inf_n_KV(const double v) { return (alpha_n_KV(v) / (alpha_n_KV(v) + beta_n_KV(v))); }
__host__ __device__ static double tau_n_KV(const double v) { return (1.0 / (alpha_n_KV(v) + beta_n_KV(v))); }
__host__ __device__ static double alpha_a_KA(const double v) { return (0.62 / (1.0 + exp(-(v + 9.17) / 23.32))); }
__host__ __device__ static double beta_a_KA(const double v) { return (0.126 / (exp((v + 18.28) / 19.47))); }
__host__ __device__ static double inf_a_KA(const double v) { return (1.0 / (1.0 + exp(-(v + 38.0) / 17.0))); }
__host__ __device__ static double tau_a_KA(const double v) { return (1.0 / (alpha_a_KA(v) + beta_a_KA(v))); }
__host__ __device__ static double alpha_b_KA(const double v) { return (0.028 / (1.0 + exp((v + 111.0) / 12.84))); }
__host__ __device__ static double beta_b_KA(const double v) { return(0.026 / (1.0 + exp(-(v + 49.95) / 8.9))); }
__host__ __device__ static double inf_b_KA(const double v) { return (1.0 / (1.0 + exp((v + 78.8) / 8.4))); }
__host__ __device__ static double tau_b_KA(const double v) { return (1.0 / (alpha_b_KA(v) + beta_b_KA(v))); }
__host__ __device__ static double alpha_c_KC(const double v, const double ca) { return (3.2 / (1.0 + 0.0015*exp(-(v) / 11.7) / ca)); }
__host__ __device__ static double beta_c_KC(const double v, const double ca) { return (0.46 / (1.0 + ca / (1.5e-4*exp(-(v) / 11.7)))); }
__host__ __device__ static double inf_c_KC(const double v, const double ca) { return (alpha_c_KC(v, ca) / (alpha_c_KC(v, ca) + beta_c_KC(v, ca))); }
__host__ __device__ static double tau_c_KC(const double v, const double ca) { return (1.0 / (alpha_c_KC(v, ca) + beta_c_KC(v, ca))); }
__host__ __device__ static double alpha_sl_Kslow(const double v) { return (0.0037*exp((v + 30.0) / 40.0)); }
__host__ __device__ static double beta_sl_Kslow(const double v) { return (0.0037*exp(-(v + 30.0) / 20.0)); }
__host__ __device__ static double inf_sl_Kslow(const double v) { return (1.0 / (1.0 + exp(-(v + 35.0) / 6.0))); }
__host__ __device__ static double tau_sl_Kslow(const double v) { return (1.0 / (alpha_sl_Kslow(v) + beta_sl_Kslow(v))); }
__host__ __device__ static double r_HCN1(const double v) { return (0.0021 * (v) + 0.97); }
__host__ __device__ static double inf_hf_HCN1(const double v) { return (r_HCN1(v) * (1.0 / (1.0 + exp((v + 72.5)*0.11)))); }
__host__ __device__ static double inf_hs_HCN1(const double v) { return ((1.0 - r_HCN1(v)) * (1.0 / (1.0 + exp((v + 72.5)*0.11)))); }
__host__ __device__ static double tau_hf_HCN1(const double v) { return (exp((0.0137*v + 3.37)*2.3)); }
__host__ __device__ static double tau_hs_HCN1(const double v) { return (exp((0.0145*v + 4.06)*2.3)); }
__host__ __device__ static double r_HCN2(const double v) {
//return (-0.0227 * (v + 10.0) - 1.47);
if (v >= -64.70)return 0.0;
else if (v <= -108.70) return 1.0;
else return (-0.0227 * (v) - 1.47);
}
__host__ __device__ static double inf_hf_HCN2(const double v) { return (r_HCN2(v) * (1.0 / (1.0 + exp((v + 81.9)*0.16)))); }
__host__ __device__ static double inf_hs_HCN2(const double v) { return ((1.0 - r_HCN2(v)) * (1.0 / (1.0 + exp((v + 81.9)*0.16)))); }
__host__ __device__ static double tau_hf_HCN2(const double v) { return (exp((0.027*v + 5.6)*2.3)); }
__host__ __device__ static double tau_hs_HCN2(const double v) { return (exp((0.015*v + 5.3)*2.3)); }
__global__
void go_KAHP_update_2order ( const int n, const double *ca, double *ca_old,
double *o1, double *o2, double *c1, double *c2, double *c3, double *c4, const double DT )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
double i1, i2, i3, a1, a2, b1, b2;
i1 = 80e-3;
i2 = 80e-3;
i3 = 200e-3;
a1 = 1.0;
a2 = 100e-3;
b1 = 160e-3;
b2 = 1.2;
//double k1, k2, k3, k4;
//for (int i = 0; i < n * GO_COMP; i++) {
if ( id < n * GO_COMP )
{
double l_ca = ca_old [ id ];
ca_old [ id ] = ca [ id ];
double l_dt = 2.0 / DT;
double d1 = 200 * l_ca / 3.0;
double d2 = 160 * l_ca / 3.0;
double d3 = 80 * l_ca / 3.0;
double vca_n [ 6 ] [ 6 ] = {
{ -d1 + l_dt, i1, 0.0, 0.0, 0.0, 0.0 },
{ d1, -d2 - i1 + l_dt, i2, 0.0, 0.0, 0.0 },
{ 0.0, d2, -(d3 + i2 + b1) + l_dt, i3, a1, 0.0 },
{ 0.0, 0.0, d3, -(i3 + b2) + l_dt, 0.0, a2 },
{ 0.0, 0.0, b1, 0.0, -a1 + l_dt, 0.0 },
{ 0.0, 0.0, 0.0, b2, 0.0, -a2 + l_dt }
};
double vc [ 6 ] = { c1 [ id ], c2 [ id ], c3 [ id ], c4 [ id ], o1 [ id ], o2 [ id ] };
double vcb [ 6 ] = { };
for ( int i = 0; i < 6; i++ )
{
for ( int j = 0; j < 6; j++ )
{
vcb [ i ] -= vc [ j ] * vca_n [ i ] [ j ];
}
}
l_ca = ca [ id ];
d1 = 200 * l_ca / 3.0;
d2 = 160 * l_ca / 3.0;
d3 = 80 * l_ca / 3.0;
double vca [ 6 ] [ 6 ] = {
{ -d1 - l_dt, i1, 0.0, 0.0, 0.0, 0.0 },
{ d1, -d2 - i1 - l_dt, i2, 0.0, 0.0, 0.0 },
{ 0.0, d2, -(d3 + i2 + b1) - l_dt, i3, a1, 0.0 },
{ 0.0, 0.0, d3, -(i3 + b2) - l_dt, 0.0, a2 },
{ 0.0, 0.0, b1, 0.0, -a1 - l_dt, 0.0 },
{ 0.0, 0.0, 0.0, b2, 0.0, -a2 - l_dt }
};
//////////////////// Pivot selection //////////////////
double temp;
for ( int i = 0; i < 5; i++ )
{
int pivot = i;
double p_max = fabs ( vca [ i ] [ i ] );
for ( int j = i + 1; j < 6; j++ )
{
if ( fabs ( vca [ j ] [ i ] ) > p_max )
{
pivot = j;
p_max = fabs ( vca [ j ] [ i ] );
}
}
// minimum pivot error
if ( fabs ( p_max ) < 1.0e-12 ) { printf ( "pivot error\n" ); return; }
if ( pivot != i ) // pivot exchange
{
for ( int j = i; j < 6; j++ )
{
temp = vca [ i ] [ j ];
vca [ i ] [ j ] = vca [ pivot ] [ j ];
vca [ pivot ] [ j ] = temp;
}
temp = vcb [ i ];
vcb [ i ] = vcb [ pivot ];
vcb [ pivot ] = temp;
}
//////////////////// Forward elimination //////////////////
for ( int j = i + 1; j < 6; j++ )
{
double w = vca [ j ] [ i ] / vca [ i ] [ i ];
vca [ j ] [ i ] = 0.0;
// Multiply the ith line by -a[j][i]/a[i][i] and add it to the jth line
for ( int k = i + 1; k < 6; k++ )
{
vca [ j ] [ k ] = vca [ j ] [ k ] - vca [ i ] [ k ] * w;
}
vcb [ j ] = vcb [ j ] - vcb [ i ] * w;
}
}
//////////////////// Backward elimination //////////////////
for ( int i = 6 - 1; i >= 0; i-- )
{
for( int j = i + 1; j < 6; j++)
{
vcb [ i ] = vcb [ i ] - vca [ i ] [ j ] * vcb [ j ];
vca [ i ] [ j ] = 0.0;
}
vcb [ i ] = vcb [ i ] / vca [ i ] [ i ];
vca [ i ] [ i ] = 1.0;
}
c1[id] = vcb[0];
c2[id] = vcb[1];
c3[id] = vcb[2];
c4[id] = vcb[3];
o1[id] = vcb[4];
o2[id] = vcb[5];
if ((o1[id] + o2[id] + c1[id] + c2[id] + c3[id] + c4[id] < 0.9999)
|| (o1[id] + o2[id] + c1[id] + c2[id] + c3[id] + c4[id] > 1.0001)) {
printf("KAHP error %.15f\n", o1[id] + o2[id] + c1[id] + c2[id] + c3[id] + c4[id]); //break;
}
}
}
__global__
void go_KAHP_update ( const int n, const double *ca, double *o1, double *o2, double *c1, double *c2, double *c3, double *c4, const double DT )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
double i1, i2, i3, a1, a2, b1, b2;
i1 = 80e-3;
i2 = 80e-3;
i3 = 200e-3;
a1 = 1.0;
a2 = 100e-3;
b1 = 160e-3;
b2 = 1.2;
//double k1, k2, k3, k4;
//for (int i = 0; i < n * GO_COMP; i++) {
if ( id < n * GO_COMP )
{
double d1 = 200 * ca [ id ] / 3.0;
double d2 = 160 * ca [ id ] / 3.0;
double d3 = 80 * ca [ id ] / 3.0;
double vca [ 6 ] [ 6 ] = {
{ -d1, i1, 0.0, 0.0, 0.0, 0.0 },
{ d1, -d2 - i1, i2, 0.0, 0.0, 0.0 },
{ 0.0, d2, -(d3 + i2 + b1), i3, a1, 0.0 },
{ 0.0, 0.0, d3, -(i3 + b2), 0.0, a2 },
{ 0.0, 0.0, b1, 0.0, -a1, 0.0 },
{ 0.0, 0.0, 0.0, b2, 0.0, -a2 }
};
double vcb [ 6 ] = { c1 [ id ], c2 [ id ], c3 [ id ], c4 [ id ], o1 [ id ], o2 [ id ] };
for ( int i = 0; i < 6; i++ )
{
for ( int j = 0; j < 6; j++ ) { vca [ i ] [ j ] *= - DT; }
}
for ( int i = 0; i < 6; i++ ) { vca [ i ] [ i ] += 1.0; }
//////////////////// Pivot selection //////////////////
double temp;
for ( int i = 0; i < 5; i++ )
{
int pivot = i;
double p_max = fabs ( vca [ i ] [ i ] );
for ( int j = i + 1; j < 6; j++ )
{
if ( fabs ( vca [ j ] [ i ] ) > p_max )
{
pivot = j;
p_max = fabs ( vca [ j ] [ i ] );
}
}
// minimum pivot error
if ( fabs ( p_max ) < 1.0e-12 ) { printf ( "pivot error\n" ); return; }
if ( pivot != i ) // pivot exchange
{
for ( int j = i; j < 6; j++ )
{
temp = vca [ i ] [ j ];
vca [ i ] [ j ] = vca [ pivot ] [ j ];
vca [ pivot ] [ j ] = temp;
}
temp = vcb [ i ];
vcb [ i ] = vcb [ pivot ];
vcb [ pivot ] = temp;
}
//////////////////// Forward elimination //////////////////
for ( int j = i + 1; j < 6; j++ )
{
double w = vca [ j ] [ i ] / vca [ i ] [ i ];
vca [ j ] [ i ] = 0.0;
// Multiply the ith line by -a[j][i]/a[i][i] and add it to the jth line
for ( int k = i + 1; k < 6; k++ )
{
vca [ j ] [ k ] = vca [ j ] [ k ] - vca [ i ] [ k ] * w;
}
vcb [ j ] = vcb [ j ] - vcb [ i ] * w;
}
}
//////////////////// Backward elimination //////////////////
for ( int i = 6 - 1; i >= 0; i-- )
{
for( int j = i + 1; j < 6; j++)
{
vcb [ i ] = vcb [ i ] - vca [ i ] [ j ] * vcb [ j ];
vca [ i ] [ j ] = 0.0;
}
vcb [ i ] = vcb [ i ] / vca [ i ] [ i ];
vca [ i ] [ i ] = 1.0;
}
c1[id] = vcb[0];
c2[id] = vcb[1];
c3[id] = vcb[2];
c4[id] = vcb[3];
o1[id] = vcb[4];
o2[id] = vcb[5];
if ((o1[id] + o2[id] + c1[id] + c2[id] + c3[id] + c4[id] < 0.9999)
|| (o1[id] + o2[id] + c1[id] + c2[id] + c3[id] + c4[id] > 1.0001)) {
printf("KAHP error %.15f\n", o1[id] + o2[id] + c1[id] + c2[id] + c3[id] + c4[id]); //break;
}
}
}
__global__ void go_update_ion_exp_imp ( neuron_t *d_go, neuron_solve_t *d_go_solve, const double DT )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
double **elem = d_go -> elem;
double **ion = d_go -> ion;
double **cond = d_go -> cond;
if ( id < d_go -> nc)
{
double v_val = ( elem [ v ] [ id ] + d_go_solve -> vec [ cn_v_old ] [ id ] ) / 2.0;
double Ca_val = elem [ Ca ] [ id ];
double Ca2_val = d_go -> ca2 [ id ];
double I_Ca1 = 1e-3 * cond [ g_CaHVA_go ] [ id ] / elem [ area ] [ id ] * ion [ ch_CaHVA_go ] [ id ] *
ion [ ch_CaHVA_go ] [ id ] * ion [ ci_CaHVA_go ] [ id ] * ( d_go_solve -> vec [ cn_v_old ] [ id ] - V_Ca_GO ); // I_Ca [mA/cm^2]
double I_Ca2 = 1e-3 * cond [ g_CaLVA_go ] [ id ] / elem [ area ] [ id ] * ion [ cl_CaLVA_go ] [ id ] *
ion [ cl_CaLVA_go ] [ id ] * ion [ cm_CaLVA_go ] [ id ] * ( d_go_solve -> vec [ cn_v_old ] [ id ] - ( d_go -> rev_ca2 [ id ] ) ); // I_Ca [mA/cm^2]
double k1 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val - Ca1_0_GO ) ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
double k2 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k1 / 2.0 - Ca1_0_GO ) );
double k3 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k2 / 2.0 - Ca1_0_GO ) );
double k4 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k3 - Ca1_0_GO ) );
elem [ Ca ] [ id ] += ( k1 + k2 * 2.0 + k3 * 2.0 + k4 ) / 6.0;
k1 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val - Ca1_0_GO ) ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
k2 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k1 / 2.0 - Ca1_0_GO ) );
k3 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k2 / 2.0 - Ca1_0_GO ) );
k4 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k3 - Ca1_0_GO ) );
d_go -> ca2 [ id ] += ( k1 + k2 * 2.0 + k3 * 2.0 + k4 ) / 6.0;
// Ca2 Vrev update
d_go -> rev_ca2 [ id ] = ( 1e3 ) * ( 8.313424 * ( 23.0 + 273.15 ) ) / (2 * F_GO ) * log ( Ca1OUT_GO / d_go -> ca2 [ id ] );//[mV]
Ca_val = ( Ca_val + elem [ Ca ] [ id ] ) / 2.0;
ion [ m_NaT_go ] [ id ] = ( 2.0 * DT * inf_m_NaT ( v_val ) + ( 2.0 * tau_m_NaT ( v_val ) - DT ) * ion [ m_NaT_go ] [ id ] ) / ( 2.0 * tau_m_NaT ( v_val ) + DT );
ion [ h_NaT_go ] [ id ] = ( 2.0 * DT * inf_h_NaT ( v_val ) + ( 2.0 * tau_h_NaT ( v_val ) - DT ) * ion [ h_NaT_go ] [ id ] ) / ( 2.0 * tau_h_NaT ( v_val ) + DT );
ion [ r_NaR_go ] [ id ] = ( 2.0 * DT * inf_r_NaR ( v_val ) + ( 2.0 * tau_r_NaR ( v_val ) - DT ) * ion [ r_NaR_go ] [ id ] ) / ( 2.0 * tau_r_NaR ( v_val ) + DT );
ion [ s_NaR_go ] [ id ] = ( 2.0 * DT * inf_s_NaR ( v_val ) + ( 2.0 * tau_s_NaR ( v_val ) - DT ) * ion [ s_NaR_go ] [ id ] ) / ( 2.0 * tau_s_NaR ( v_val ) + DT );
ion [ p_NaP_go ] [ id ] = ( 2.0 * DT * inf_p_NaP ( v_val ) + ( 2.0 * tau_p_NaP ( v_val ) - DT ) * ion [ p_NaP_go ] [ id ] ) / ( 2.0 * tau_p_NaP ( v_val ) + DT );
ion [ n_KV_go ] [ id ] = ( 2.0 * DT * inf_n_KV ( v_val ) + ( 2.0 * tau_n_KV ( v_val ) - DT ) * ion [ n_KV_go ] [ id ] ) / ( 2.0 * tau_n_KV ( v_val ) + DT );
ion [ a_KA_go ] [ id ] = ( 2.0 * DT * inf_a_KA ( v_val ) + ( 2.0 * tau_a_KA ( v_val ) - DT ) * ion [ a_KA_go ] [ id ] ) / ( 2.0 * tau_a_KA ( v_val ) + DT );
ion [ b_KA_go ] [ id ] = ( 2.0 * DT * inf_b_KA ( v_val ) + ( 2.0 * tau_b_KA ( v_val ) - DT ) * ion [ b_KA_go ] [ id ] ) / ( 2.0 * tau_b_KA ( v_val ) + DT );
ion [ sl_Kslow_go ] [ id ] = ( 2.0 * DT * inf_sl_Kslow ( v_val ) + ( 2.0 * tau_sl_Kslow ( v_val ) - DT ) * ion [ sl_Kslow_go ] [ id ] ) / ( 2.0 * tau_sl_Kslow ( v_val ) + DT );
ion [ c_KC_go ] [ id ] = ( 2.0 * DT * inf_c_KC ( v_val, Ca_val ) + ( 2.0 * tau_c_KC ( v_val, Ca_val ) - DT ) * ion [ c_KC_go ] [ id ] ) / ( 2.0 * tau_c_KC ( v_val, Ca_val ) + DT );
ion [ ch_CaHVA_go ] [ id ] = ( 2.0 * DT * inf_ch_CaHVA ( v_val ) + ( 2.0 * tau_ch_CaHVA ( v_val ) - DT ) * ion [ ch_CaHVA_go ] [ id ] ) / ( 2.0 * tau_ch_CaHVA ( v_val ) + DT );
ion [ ci_CaHVA_go ] [ id ] = ( 2.0 * DT * inf_ci_CaHVA ( v_val ) + ( 2.0 * tau_ci_CaHVA ( v_val ) - DT ) * ion [ ci_CaHVA_go ] [ id ] ) / ( 2.0 * tau_ci_CaHVA ( v_val ) + DT );
ion [ cl_CaLVA_go ] [ id ] = ( 2.0 * DT * inf_cl_CaLVA ( v_val ) + ( 2.0 * tau_cl_CaLVA ( v_val ) - DT ) * ion [ cl_CaLVA_go ] [ id ] ) / ( 2.0 * tau_cl_CaLVA ( v_val ) + DT );
ion [ cm_CaLVA_go ] [ id ] = ( 2.0 * DT * inf_cm_CaLVA ( v_val ) + ( 2.0 * tau_cm_CaLVA ( v_val ) - DT ) * ion [ cm_CaLVA_go ] [ id ] ) / ( 2.0 * tau_cm_CaLVA ( v_val ) + DT );
ion [ hf_HCN1_go ] [ id ] = ( 2.0 * DT * inf_hf_HCN1 ( v_val ) + ( 2.0 * tau_hf_HCN1 ( v_val ) - DT ) * ion [ hf_HCN1_go ] [ id ] ) / ( 2.0 * tau_hf_HCN1 ( v_val ) + DT );
ion [ hf_HCN2_go ] [ id ] = ( 2.0 * DT * inf_hf_HCN2 ( v_val ) + ( 2.0 * tau_hf_HCN2 ( v_val ) - DT ) * ion [ hf_HCN2_go ] [ id ] ) / ( 2.0 * tau_hf_HCN2 ( v_val ) + DT );
ion [ hs_HCN1_go ] [ id ] = ( 2.0 * DT * inf_hs_HCN1 ( v_val ) + ( 2.0 * tau_hs_HCN1 ( v_val ) - DT ) * ion [ hs_HCN1_go ] [ id ] ) / ( 2.0 * tau_hs_HCN1 ( v_val ) + DT );
ion [ hs_HCN2_go ] [ id ] = ( 2.0 * DT * inf_hs_HCN2 ( v_val ) + ( 2.0 * tau_hs_HCN2 ( v_val ) - DT ) * ion [ hs_HCN2_go ] [ id ] ) / ( 2.0 * tau_hs_HCN2 ( v_val ) + DT );
d_go_solve -> vec [ cn_v_old ] [ id ] = elem [ v ] [ id ] ;
}
}
__global__ void go_update_ion_RKC_exp_imp ( neuron_t *d_go, neuron_solve_t *d_go_solve, double *vnew, const double DT )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
double **elem = d_go -> elem;
double **ion = d_go -> ion;
double **cond = d_go -> cond;
if ( id < d_go -> nc)
{
double v_val = ( elem [ v ] [ id ] + vnew [ id ] ) / 2.0;
double Ca_val = elem [ Ca ] [ id ];
double Ca2_val = d_go -> ca2 [ id ];
double I_Ca1 = 1e-3 * cond [ g_CaHVA_go ] [ id ] / elem [ area ] [ id ] * ion [ ch_CaHVA_go ] [ id ] *
ion [ ch_CaHVA_go ] [ id ] * ion [ ci_CaHVA_go ] [ id ] * ( elem [ v ] [ id ] - V_Ca_GO ); // I_Ca [mA/cm^2]
double I_Ca2 = 1e-3 * cond [ g_CaLVA_go ] [ id ] / elem [ area ] [ id ] * ion [ cl_CaLVA_go ] [ id ] *
ion [ cl_CaLVA_go ] [ id ] * ion [ cm_CaLVA_go ] [ id ] * ( elem [ v ] [ id ] - ( d_go -> rev_ca2 [ id ] ) ); // I_Ca [mA/cm^2]
double k1 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val - Ca1_0_GO ) ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
double k2 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k1 / 2.0 - Ca1_0_GO ) );
double k3 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k2 / 2.0 - Ca1_0_GO ) );
double k4 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k3 - Ca1_0_GO ) );
elem [ Ca ] [ id ] += ( k1 + k2 * 2.0 + k3 * 2.0 + k4 ) / 6.0;
k1 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val - Ca1_0_GO ) ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
k2 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k1 / 2.0 - Ca1_0_GO ) );
k3 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k2 / 2.0 - Ca1_0_GO ) );
k4 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k3 - Ca1_0_GO ) );
d_go -> ca2 [ id ] += ( k1 + k2 * 2.0 + k3 * 2.0 + k4 ) / 6.0;
// Ca2 Vrev update
d_go -> rev_ca2 [ id ] = ( 1e3 ) * ( 8.313424 * ( 23.0 + 273.15 ) ) / (2 * F_GO ) * log ( Ca1OUT_GO / d_go -> ca2 [ id ] );//[mV]
Ca_val = ( Ca_val + elem [ Ca ] [ id ] ) / 2.0;
ion [ m_NaT_go ] [ id ] = ( 2.0 * DT * inf_m_NaT ( v_val ) + ( 2.0 * tau_m_NaT ( v_val ) - DT ) * ion [ m_NaT_go ] [ id ] ) / ( 2.0 * tau_m_NaT ( v_val ) + DT );
ion [ h_NaT_go ] [ id ] = ( 2.0 * DT * inf_h_NaT ( v_val ) + ( 2.0 * tau_h_NaT ( v_val ) - DT ) * ion [ h_NaT_go ] [ id ] ) / ( 2.0 * tau_h_NaT ( v_val ) + DT );
ion [ r_NaR_go ] [ id ] = ( 2.0 * DT * inf_r_NaR ( v_val ) + ( 2.0 * tau_r_NaR ( v_val ) - DT ) * ion [ r_NaR_go ] [ id ] ) / ( 2.0 * tau_r_NaR ( v_val ) + DT );
ion [ s_NaR_go ] [ id ] = ( 2.0 * DT * inf_s_NaR ( v_val ) + ( 2.0 * tau_s_NaR ( v_val ) - DT ) * ion [ s_NaR_go ] [ id ] ) / ( 2.0 * tau_s_NaR ( v_val ) + DT );
ion [ p_NaP_go ] [ id ] = ( 2.0 * DT * inf_p_NaP ( v_val ) + ( 2.0 * tau_p_NaP ( v_val ) - DT ) * ion [ p_NaP_go ] [ id ] ) / ( 2.0 * tau_p_NaP ( v_val ) + DT );
ion [ n_KV_go ] [ id ] = ( 2.0 * DT * inf_n_KV ( v_val ) + ( 2.0 * tau_n_KV ( v_val ) - DT ) * ion [ n_KV_go ] [ id ] ) / ( 2.0 * tau_n_KV ( v_val ) + DT );
ion [ a_KA_go ] [ id ] = ( 2.0 * DT * inf_a_KA ( v_val ) + ( 2.0 * tau_a_KA ( v_val ) - DT ) * ion [ a_KA_go ] [ id ] ) / ( 2.0 * tau_a_KA ( v_val ) + DT );
ion [ b_KA_go ] [ id ] = ( 2.0 * DT * inf_b_KA ( v_val ) + ( 2.0 * tau_b_KA ( v_val ) - DT ) * ion [ b_KA_go ] [ id ] ) / ( 2.0 * tau_b_KA ( v_val ) + DT );
ion [ sl_Kslow_go ] [ id ] = ( 2.0 * DT * inf_sl_Kslow ( v_val ) + ( 2.0 * tau_sl_Kslow ( v_val ) - DT ) * ion [ sl_Kslow_go ] [ id ] ) / ( 2.0 * tau_sl_Kslow ( v_val ) + DT );
ion [ c_KC_go ] [ id ] = ( 2.0 * DT * inf_c_KC ( v_val, Ca_val ) + ( 2.0 * tau_c_KC ( v_val, Ca_val ) - DT ) * ion [ c_KC_go ] [ id ] ) / ( 2.0 * tau_c_KC ( v_val, Ca_val ) + DT );
ion [ ch_CaHVA_go ] [ id ] = ( 2.0 * DT * inf_ch_CaHVA ( v_val ) + ( 2.0 * tau_ch_CaHVA ( v_val ) - DT ) * ion [ ch_CaHVA_go ] [ id ] ) / ( 2.0 * tau_ch_CaHVA ( v_val ) + DT );
ion [ ci_CaHVA_go ] [ id ] = ( 2.0 * DT * inf_ci_CaHVA ( v_val ) + ( 2.0 * tau_ci_CaHVA ( v_val ) - DT ) * ion [ ci_CaHVA_go ] [ id ] ) / ( 2.0 * tau_ci_CaHVA ( v_val ) + DT );
ion [ cl_CaLVA_go ] [ id ] = ( 2.0 * DT * inf_cl_CaLVA ( v_val ) + ( 2.0 * tau_cl_CaLVA ( v_val ) - DT ) * ion [ cl_CaLVA_go ] [ id ] ) / ( 2.0 * tau_cl_CaLVA ( v_val ) + DT );
ion [ cm_CaLVA_go ] [ id ] = ( 2.0 * DT * inf_cm_CaLVA ( v_val ) + ( 2.0 * tau_cm_CaLVA ( v_val ) - DT ) * ion [ cm_CaLVA_go ] [ id ] ) / ( 2.0 * tau_cm_CaLVA ( v_val ) + DT );
ion [ hf_HCN1_go ] [ id ] = ( 2.0 * DT * inf_hf_HCN1 ( v_val ) + ( 2.0 * tau_hf_HCN1 ( v_val ) - DT ) * ion [ hf_HCN1_go ] [ id ] ) / ( 2.0 * tau_hf_HCN1 ( v_val ) + DT );
ion [ hf_HCN2_go ] [ id ] = ( 2.0 * DT * inf_hf_HCN2 ( v_val ) + ( 2.0 * tau_hf_HCN2 ( v_val ) - DT ) * ion [ hf_HCN2_go ] [ id ] ) / ( 2.0 * tau_hf_HCN2 ( v_val ) + DT );
ion [ hs_HCN1_go ] [ id ] = ( 2.0 * DT * inf_hs_HCN1 ( v_val ) + ( 2.0 * tau_hs_HCN1 ( v_val ) - DT ) * ion [ hs_HCN1_go ] [ id ] ) / ( 2.0 * tau_hs_HCN1 ( v_val ) + DT );
ion [ hs_HCN2_go ] [ id ] = ( 2.0 * DT * inf_hs_HCN2 ( v_val ) + ( 2.0 * tau_hs_HCN2 ( v_val ) - DT ) * ion [ hs_HCN2_go ] [ id ] ) / ( 2.0 * tau_hs_HCN2 ( v_val ) + DT );
elem [ v ] [ id ] = vnew [ id ];
}
}
__global__ void go_update_ion ( neuron_t *d_go, neuron_solve_t *d_go_solve, const double DT )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
double **elem = d_go -> elem;
double **ion = d_go -> ion;
double **cond = d_go -> cond;
if ( id < d_go -> nc)
{
double v_val = elem [ v ] [ id ];
double Ca_val = elem [ Ca ] [ id ];
double Ca2_val = d_go -> ca2 [ id ];
double I_Ca1 = 1e-3 * cond [ g_CaHVA_go ] [ id ] / elem [ area ] [ id ] * ion [ ch_CaHVA_go ] [ id ] *
ion [ ch_CaHVA_go ] [ id ] * ion [ ci_CaHVA_go ] [ id ] * ( elem [ v ] [ id ] - V_Ca_GO ); // I_Ca [mA/cm^2]
double I_Ca2 = 1e-3 * cond [ g_CaLVA_go ] [ id ] / elem [ area ] [ id ] * ion [ cl_CaLVA_go ] [ id ] *
ion [ cl_CaLVA_go ] [ id ] * ion [ cm_CaLVA_go ] [ id ] * ( elem [ v ] [ id ] - ( d_go -> rev_ca2 [ id ] ) ); // I_Ca [mA/cm^2]
ion [ m_NaT_go ] [ id ] = inf_m_NaT ( v_val ) + ( ion [ m_NaT_go ] [ id ] - inf_m_NaT ( v_val ) ) * exp ( -DT / tau_m_NaT ( v_val ) );
ion [ h_NaT_go ] [ id ] = inf_h_NaT ( v_val ) + ( ion [ h_NaT_go ] [ id ] - inf_h_NaT ( v_val ) ) * exp ( -DT / tau_h_NaT ( v_val ) );
ion [ r_NaR_go ] [ id ] = inf_r_NaR ( v_val ) + ( ion [ r_NaR_go ] [ id ] - inf_r_NaR ( v_val ) ) * exp ( -DT / tau_r_NaR ( v_val ) );
ion [ s_NaR_go ] [ id ] = inf_s_NaR ( v_val ) + ( ion [ s_NaR_go ] [ id ] - inf_s_NaR ( v_val ) ) * exp ( -DT / tau_s_NaR ( v_val ) );
ion [ p_NaP_go ] [ id ] = inf_p_NaP ( v_val ) + ( ion [ p_NaP_go ] [ id ] - inf_p_NaP ( v_val ) ) * exp ( -DT / tau_p_NaP ( v_val ) );
ion [ n_KV_go ] [ id ] = inf_n_KV ( v_val ) + ( ion [ n_KV_go ] [ id ] - inf_n_KV ( v_val ) ) * exp ( -DT / tau_n_KV ( v_val ) );
ion [ a_KA_go ] [ id ] = inf_a_KA ( v_val ) + ( ion [ a_KA_go ] [ id ] - inf_a_KA ( v_val ) ) * exp ( -DT / tau_a_KA ( v_val ) );
ion [ b_KA_go ] [ id ] = inf_b_KA ( v_val ) + ( ion [ b_KA_go ] [ id ] - inf_b_KA ( v_val ) ) * exp ( -DT / tau_b_KA ( v_val ) );
ion [ c_KC_go ] [ id ] = inf_c_KC ( v_val , Ca_val )
+ (ion [ c_KC_go ] [ id ] - inf_c_KC ( v_val , Ca_val ) ) * exp ( -DT / tau_c_KC ( v_val , Ca_val ) );
ion [ sl_Kslow_go ] [ id ] = inf_sl_Kslow ( v_val ) + ( ion [ sl_Kslow_go ] [ id ] - inf_sl_Kslow ( v_val ) ) * exp ( -DT / tau_sl_Kslow ( v_val ) );
ion [ ch_CaHVA_go ] [ id ] = inf_ch_CaHVA( v_val ) + ( ion [ ch_CaHVA_go ] [ id ] - inf_ch_CaHVA ( v_val ) ) * exp ( -DT / tau_ch_CaHVA( v_val ) );
ion [ ci_CaHVA_go ] [ id ] = inf_ci_CaHVA( v_val ) + ( ion [ ci_CaHVA_go ] [ id ] - inf_ci_CaHVA ( v_val ) ) * exp ( -DT / tau_ci_CaHVA( v_val ) );
ion [ cl_CaLVA_go ] [ id ] = inf_cl_CaLVA( v_val ) + ( ion [ cl_CaLVA_go ] [ id ] - inf_cl_CaLVA ( v_val ) ) * exp ( -DT / tau_cl_CaLVA( v_val ) );
ion [ cm_CaLVA_go ] [ id ] = inf_cm_CaLVA( v_val ) + ( ion [ cm_CaLVA_go ] [ id ] - inf_cm_CaLVA ( v_val ) ) * exp ( -DT / tau_cm_CaLVA( v_val ) );
ion [ hf_HCN1_go ] [ id ] = inf_hf_HCN1( v_val ) + ( ion [ hf_HCN1_go ] [ id ] - inf_hf_HCN1 ( v_val ) ) * exp ( -DT / tau_hf_HCN1( v_val ) );
ion [ hf_HCN2_go ] [ id ] = inf_hf_HCN2( v_val ) + ( ion [ hf_HCN2_go ] [ id ] - inf_hf_HCN2 ( v_val ) ) * exp ( -DT / tau_hf_HCN2( v_val ) );
ion [ hs_HCN1_go ] [ id ] = inf_hs_HCN1( v_val ) + ( ion [ hs_HCN1_go ] [ id ] - inf_hs_HCN1 ( v_val ) ) * exp ( -DT / tau_hs_HCN1( v_val ) );
ion [ hs_HCN2_go ] [ id ] = inf_hs_HCN2( v_val ) + ( ion [ hs_HCN2_go ] [ id ] - inf_hs_HCN2 ( v_val ) ) * exp ( -DT / tau_hs_HCN2( v_val ) );
// integral
//elem [ Ca ] [ id ] = ( DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) + B_Ca1_GO * Ca1_0_GO) + Ca_val ) / (1.0 + DT * B_Ca1_GO);
// Euler
//double dCa = - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val - Ca1_0_GO ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
//elem [ Ca ] [ id ] += dCa * DT;
// RK4
double k1 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val - Ca1_0_GO ) ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
double k2 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k1 / 2.0 - Ca1_0_GO ) );
double k3 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k2 / 2.0 - Ca1_0_GO ) );
double k4 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k3 - Ca1_0_GO ) );
elem [ Ca ] [ id ] += ( k1 + k2 * 2.0 + k3 * 2.0 + k4 ) / 6.0;
//double Cinf = 5e-5 - I_Ca2 / ( 2.0 * 9.6485e4 * 0.2e-4 * 1.3 );
//go -> ca2 [ id ] = Cinf - (Cinf - Ca2_val) * exp ( - DT * 1.3);
// Ca2 Euler
//double dCa2 = - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val - Ca1_0_GO ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
//d_go -> ca2 [ id ] += dCa2 * DT;
// Ca2 RK4
k1 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val - Ca1_0_GO ) ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
k2 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k1 / 2.0 - Ca1_0_GO ) );
k3 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k2 / 2.0 - Ca1_0_GO ) );
k4 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k3 - Ca1_0_GO ) );
d_go -> ca2 [ id ] += ( k1 + k2 * 2.0 + k3 * 2.0 + k4 ) / 6.0;
// Ca2 Vrev update
d_go -> rev_ca2 [ id ] = ( 1e3 ) * ( 8.313424 * ( 23.0 + 273.15 ) ) / (2 * F_GO ) * log ( Ca1OUT_GO / d_go -> ca2 [ id ] );//[mV]
//double Cinf = 5e-5 - I_Ca1 / ( 2.0 * 9.6485e4 * 0.2e-4 * 1.3 );
//elem [ Ca ] [ id ] = Cinf - (Cinf - Ca_val) * exp ( - DT * 1.3);
}
}
__global__ void go_update_ion_RKC ( neuron_t *d_go, neuron_solve_t *d_go_solve, double *elem_v, const double DT )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
double **elem = d_go -> elem;
double **ion = d_go -> ion;
double **cond = d_go -> cond;
if ( id < d_go -> nc)
{
//double v_val = elem [ v ] [ id ];
double v_val = elem_v [ id ];
double Ca_val = elem [ Ca ] [ id ];
double Ca2_val = d_go -> ca2 [ id ];
double I_Ca1 = 1e-3 * cond [ g_CaHVA_go ] [ id ] / elem [ area ] [ id ] * ion [ ch_CaHVA_go ] [ id ] *
ion [ ch_CaHVA_go ] [ id ] * ion [ ci_CaHVA_go ] [ id ] * ( elem [ v ] [ id ] - V_Ca_GO ); // I_Ca [mA/cm^2]
double I_Ca2 = 1e-3 * cond [ g_CaLVA_go ] [ id ] / elem [ area ] [ id ] * ion [ cl_CaLVA_go ] [ id ] *
ion [ cl_CaLVA_go ] [ id ] * ion [ cm_CaLVA_go ] [ id ] * ( elem [ v ] [ id ] - ( d_go -> rev_ca2 [ id ] ) ); // I_Ca [mA/cm^2]
ion [ m_NaT_go ] [ id ] = inf_m_NaT ( v_val ) + ( ion [ m_NaT_go ] [ id ] - inf_m_NaT ( v_val ) ) * exp ( -DT / tau_m_NaT ( v_val ) );
ion [ h_NaT_go ] [ id ] = inf_h_NaT ( v_val ) + ( ion [ h_NaT_go ] [ id ] - inf_h_NaT ( v_val ) ) * exp ( -DT / tau_h_NaT ( v_val ) );
ion [ r_NaR_go ] [ id ] = inf_r_NaR ( v_val ) + ( ion [ r_NaR_go ] [ id ] - inf_r_NaR ( v_val ) ) * exp ( -DT / tau_r_NaR ( v_val ) );
ion [ s_NaR_go ] [ id ] = inf_s_NaR ( v_val ) + ( ion [ s_NaR_go ] [ id ] - inf_s_NaR ( v_val ) ) * exp ( -DT / tau_s_NaR ( v_val ) );
ion [ p_NaP_go ] [ id ] = inf_p_NaP ( v_val ) + ( ion [ p_NaP_go ] [ id ] - inf_p_NaP ( v_val ) ) * exp ( -DT / tau_p_NaP ( v_val ) );
ion [ n_KV_go ] [ id ] = inf_n_KV ( v_val ) + ( ion [ n_KV_go ] [ id ] - inf_n_KV ( v_val ) ) * exp ( -DT / tau_n_KV ( v_val ) );
ion [ a_KA_go ] [ id ] = inf_a_KA ( v_val ) + ( ion [ a_KA_go ] [ id ] - inf_a_KA ( v_val ) ) * exp ( -DT / tau_a_KA ( v_val ) );
ion [ b_KA_go ] [ id ] = inf_b_KA ( v_val ) + ( ion [ b_KA_go ] [ id ] - inf_b_KA ( v_val ) ) * exp ( -DT / tau_b_KA ( v_val ) );
ion [ c_KC_go ] [ id ] = inf_c_KC ( v_val , Ca_val )
+ (ion [ c_KC_go ] [ id ] - inf_c_KC ( v_val , Ca_val ) ) * exp ( -DT / tau_c_KC ( v_val , Ca_val ) );
ion [ sl_Kslow_go ] [ id ] = inf_sl_Kslow ( v_val ) + ( ion [ sl_Kslow_go ] [ id ] - inf_sl_Kslow ( v_val ) ) * exp ( -DT / tau_sl_Kslow ( v_val ) );
ion [ ch_CaHVA_go ] [ id ] = inf_ch_CaHVA( v_val ) + ( ion [ ch_CaHVA_go ] [ id ] - inf_ch_CaHVA ( v_val ) ) * exp ( -DT / tau_ch_CaHVA( v_val ) );
ion [ ci_CaHVA_go ] [ id ] = inf_ci_CaHVA( v_val ) + ( ion [ ci_CaHVA_go ] [ id ] - inf_ci_CaHVA ( v_val ) ) * exp ( -DT / tau_ci_CaHVA( v_val ) );
ion [ cl_CaLVA_go ] [ id ] = inf_cl_CaLVA( v_val ) + ( ion [ cl_CaLVA_go ] [ id ] - inf_cl_CaLVA ( v_val ) ) * exp ( -DT / tau_cl_CaLVA( v_val ) );
ion [ cm_CaLVA_go ] [ id ] = inf_cm_CaLVA( v_val ) + ( ion [ cm_CaLVA_go ] [ id ] - inf_cm_CaLVA ( v_val ) ) * exp ( -DT / tau_cm_CaLVA( v_val ) );
ion [ hf_HCN1_go ] [ id ] = inf_hf_HCN1( v_val ) + ( ion [ hf_HCN1_go ] [ id ] - inf_hf_HCN1 ( v_val ) ) * exp ( -DT / tau_hf_HCN1( v_val ) );
ion [ hf_HCN2_go ] [ id ] = inf_hf_HCN2( v_val ) + ( ion [ hf_HCN2_go ] [ id ] - inf_hf_HCN2 ( v_val ) ) * exp ( -DT / tau_hf_HCN2( v_val ) );
ion [ hs_HCN1_go ] [ id ] = inf_hs_HCN1( v_val ) + ( ion [ hs_HCN1_go ] [ id ] - inf_hs_HCN1 ( v_val ) ) * exp ( -DT / tau_hs_HCN1( v_val ) );
ion [ hs_HCN2_go ] [ id ] = inf_hs_HCN2( v_val ) + ( ion [ hs_HCN2_go ] [ id ] - inf_hs_HCN2 ( v_val ) ) * exp ( -DT / tau_hs_HCN2( v_val ) );
// integral
//elem [ Ca ] [ id ] = ( DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) + B_Ca1_GO * Ca1_0_GO) + Ca_val ) / (1.0 + DT * B_Ca1_GO);
// Euler
double dCa = - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val - Ca1_0_GO ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
elem [ Ca ] [ id ] += dCa * DT;
// RK4
//double k1 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val - Ca1_0_GO ) ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
//double k2 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k1 / 2.0 - Ca1_0_GO ) );
//double k3 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k2 / 2.0 - Ca1_0_GO ) );
//double k4 = DT * ( - I_Ca1 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca_val + k3 - Ca1_0_GO ) );
//elem [ Ca ] [ id ] += ( k1 + k2 * 2.0 + k3 * 2.0 + k4 ) / 6.0;
//double Cinf = 5e-5 - I_Ca2 / ( 2.0 * 9.6485e4 * 0.2e-4 * 1.3 );
//go -> ca2 [ id ] = Cinf - (Cinf - Ca2_val) * exp ( - DT * 1.3);
// Ca2 Euler
double dCa2 = - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val - Ca1_0_GO ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
d_go -> ca2 [ id ] += dCa2 * DT;
// Ca2 RK4
//k1 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val - Ca1_0_GO ) ); //[mA*mol/cm^3*sec*A]=[M/sec] **[1mM = 1mol/m^3]**
//k2 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k1 / 2.0 - Ca1_0_GO ) );
//k3 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k2 / 2.0 - Ca1_0_GO ) );
//k4 = DT * ( - I_Ca2 / ( 2.0 * F_GO * SHELL1_D_GO ) - B_Ca1_GO * ( Ca2_val + k3 - Ca1_0_GO ) );
//d_go -> ca2 [ id ] += ( k1 + k2 * 2.0 + k3 * 2.0 + k4 ) / 6.0;
// Ca2 Vrev update
d_go -> rev_ca2 [ id ] = ( 1e3 ) * ( 8.313424 * ( 23.0 + 273.15 ) ) / (2 * F_GO ) * log ( Ca1OUT_GO / d_go -> ca2 [ id ] );//[mV]
//double Cinf = 5e-5 - I_Ca1 / ( 2.0 * 9.6485e4 * 0.2e-4 * 1.3 );
//elem [ Ca ] [ id ] = Cinf - (Cinf - Ca_val) * exp ( - DT * 1.3);
}
}
__host__ void go_initialize_ion ( neuron_t *go )
{
double **elem = go -> elem;
double **ion = go -> ion;
double init_v_rand = 0.0;
for ( int i = 0; i < go -> nc; i++) {
if ( i % GO_COMP == 0 )
init_v_rand = ( ( double ) rand ( ) / RAND_MAX ) - 0.5;
elem [ v ] [ i ] = V_INIT_GO + 2.0 * init_v_rand;
elem [ Ca ] [ i ] = Ca1_0_GO;
go -> ca2 [ i ] = Ca1_0_GO;
go -> rev_ca2 [ i ] = V_Ca_GO;
go -> ca_old [ i ] = Ca1_0_GO;
elem [ i_ext ] [ i ] = 0.0;
double v_val = elem [ v ] [ i ];
double ca_val = elem [ Ca ] [ i ];
ion [ m_NaT_go ] [ i ] = inf_m_NaT ( v_val );
ion [ h_NaT_go ] [ i ] = inf_h_NaT ( v_val );
ion [ r_NaR_go ] [ i ] = inf_r_NaR ( v_val );
ion [ s_NaR_go ] [ i ] = inf_s_NaR ( v_val );
ion [ p_NaP_go ] [ i ] = inf_p_NaP ( v_val );
ion [ n_KV_go ] [ i ] = inf_n_KV ( v_val );
ion [ a_KA_go ] [ i ] = inf_a_KA ( v_val );
ion [ b_KA_go ] [ i ] = inf_b_KA ( v_val );
ion [ c_KC_go ] [ i ] = inf_c_KC ( v_val, ca_val );
ion [ sl_Kslow_go ] [ i ] = inf_sl_Kslow ( v_val );
ion [ ch_CaHVA_go ] [ i ] = inf_ch_CaHVA ( v_val );
ion [ ci_CaHVA_go ] [ i ] = inf_ci_CaHVA ( v_val );
ion [ cl_CaLVA_go ] [ i ] = inf_cl_CaLVA ( v_val );
ion [ cm_CaLVA_go ] [ i ] = inf_cm_CaLVA ( v_val );
ion [ hf_HCN1_go ] [ i ] = inf_hf_HCN1 ( v_val );
ion [ hf_HCN2_go ] [ i ] = inf_hf_HCN2 ( v_val );
ion [ hs_HCN1_go ] [ i ] = inf_hs_HCN1 ( v_val );
ion [ hs_HCN2_go ] [ i ] = inf_hs_HCN2 ( v_val );
ion [ o1_KAHP_go ] [ i ] = 0.0;
ion [ o2_KAHP_go ] [ i ] = 0.0;
ion [ c2_KAHP_go ] [ i ] = 0.0;
ion [ c3_KAHP_go ] [ i ] = 0.0;
ion [ c4_KAHP_go ] [ i ] = 0.0;
ion [ c1_KAHP_go ] [ i ] = 1.0;
}
}
|
cdf22921b83f19140f45cb0e88ca98c756b0d13d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define MASK_WIDTH 5
#define O_TILE_WIDTH 16
#define BLOCK_WIDTH (O_TILE_WIDTH + (MASK_WIDTH - 1))
#define clamp(x) (min(max((x), 0.0), 1.0))
//implement the tiled 2D convolution kernel with adjustments for channels
//use shared memory to reduce the number of global accesses, handle the boundary conditions in when loading input list elements into the shared memory
__global__ void convolution_2D_kernel(float *P, float *N, int imageHeight, int imageWidth, int channels, const float * __restrict__ M) {
__shared__ float N_ds[BLOCK_WIDTH][BLOCK_WIDTH];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int tempY = by * O_TILE_WIDTH + ty;
int tempX = bx * O_TILE_WIDTH + tx;
for (int k = 0; k < channels; k++) {
float accum = 0;
int offset = ty * O_TILE_WIDTH + tx;
int yOffset = offset / BLOCK_WIDTH;
int xOffset = offset % BLOCK_WIDTH;
int yIndex = by * O_TILE_WIDTH + yOffset - (MASK_WIDTH / 2);
int xIndex = bx * O_TILE_WIDTH + xOffset - (MASK_WIDTH / 2);
int index = (yIndex * imageWidth + xIndex) * channels + k;
if (yIndex >= 0 && yIndex < imageHeight &&
xIndex >= 0 && xIndex < imageWidth) {
N_ds[yOffset][xOffset] = N[index];
} else {
N_ds[yOffset][xOffset] = 0.0f;
}
offset = ty * O_TILE_WIDTH + tx + (O_TILE_WIDTH * O_TILE_WIDTH);
yOffset = offset / BLOCK_WIDTH;
xOffset = offset % BLOCK_WIDTH;
yIndex = by * O_TILE_WIDTH + yOffset - (MASK_WIDTH / 2);
xIndex = bx * O_TILE_WIDTH + xOffset - (MASK_WIDTH / 2);
index = (yIndex * imageWidth + xIndex) * channels + k;
if (yOffset < BLOCK_WIDTH && xOffset < BLOCK_WIDTH) {
if (xIndex >= 0 && xIndex < imageWidth &&
yIndex >= 0 && yIndex < imageHeight) {
N_ds[yOffset][xOffset] = N[index];
} else {
N_ds[yOffset][xOffset] = 0.0f;
}
} else {}
__syncthreads();
for (int i = 0; i < MASK_WIDTH; i++) {
for (int j = 0; j < MASK_WIDTH; j++) {
accum += N_ds[ty + i][tx + j] * M[i * MASK_WIDTH + j];
}
}
if (tempY < imageHeight && tempX < imageWidth) {
P[(tempY * imageWidth + tempX) * channels + k] = clamp(accum);
} else {}
__syncthreads();
}
}
int main(int argc, char *argv[]) {
wbArg_t arg;
int maskRows;
int maskColumns;
int imageChannels;
int imageWidth;
int imageHeight;
char *inputImageFile;
char *inputMaskFile;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *hostMaskData;
float *deviceInputImageData;
float *deviceOutputImageData;
float *deviceMaskData;
arg = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(arg, 0);
inputMaskFile = wbArg_getInputFile(arg, 1);
inputImage = wbImport(inputImageFile);
hostMaskData = (float *)wbImport(inputMaskFile, &maskRows, &maskColumns);
assert(maskRows == MASK_WIDTH); /* mask height is fixed to 5 */
assert(maskColumns == MASK_WIDTH); /* mask width is fixed to 5 */
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
//allocate device memory
hipMalloc((void **) &deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
hipMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
hipMalloc((void **) &deviceMaskData, maskRows * maskColumns * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
//copy host memory to device
hipMemcpy(deviceInputImageData, hostInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(deviceMaskData, hostMaskData, maskRows * maskColumns * sizeof(float), hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
//initialize thread block and kernel grid dimensions
//invoke CUDA kernel
dim3 dimBlock(O_TILE_WIDTH, O_TILE_WIDTH, 1);
dim3 dimGrid(((imageWidth - 1) / O_TILE_WIDTH) + 1, ((imageHeight - 1) / O_TILE_WIDTH) + 1, 1);
hipLaunchKernelGGL(( convolution_2D_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceOutputImageData, deviceInputImageData, imageHeight, imageWidth, imageChannels, deviceMaskData);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
//copy results from device to host
hipMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
wbSolution(arg, outputImage);
//deallocate device memory
hipFree(deviceInputImageData);
hipFree(deviceOutputImageData);
hipFree(deviceMaskData);
free(hostMaskData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
return 0;
}
|
cdf22921b83f19140f45cb0e88ca98c756b0d13d.cu
|
#include <wb.h>
#define MASK_WIDTH 5
#define O_TILE_WIDTH 16
#define BLOCK_WIDTH (O_TILE_WIDTH + (MASK_WIDTH - 1))
#define clamp(x) (min(max((x), 0.0), 1.0))
//implement the tiled 2D convolution kernel with adjustments for channels
//use shared memory to reduce the number of global accesses, handle the boundary conditions in when loading input list elements into the shared memory
__global__ void convolution_2D_kernel(float *P, float *N, int imageHeight, int imageWidth, int channels, const float * __restrict__ M) {
__shared__ float N_ds[BLOCK_WIDTH][BLOCK_WIDTH];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int tempY = by * O_TILE_WIDTH + ty;
int tempX = bx * O_TILE_WIDTH + tx;
for (int k = 0; k < channels; k++) {
float accum = 0;
int offset = ty * O_TILE_WIDTH + tx;
int yOffset = offset / BLOCK_WIDTH;
int xOffset = offset % BLOCK_WIDTH;
int yIndex = by * O_TILE_WIDTH + yOffset - (MASK_WIDTH / 2);
int xIndex = bx * O_TILE_WIDTH + xOffset - (MASK_WIDTH / 2);
int index = (yIndex * imageWidth + xIndex) * channels + k;
if (yIndex >= 0 && yIndex < imageHeight &&
xIndex >= 0 && xIndex < imageWidth) {
N_ds[yOffset][xOffset] = N[index];
} else {
N_ds[yOffset][xOffset] = 0.0f;
}
offset = ty * O_TILE_WIDTH + tx + (O_TILE_WIDTH * O_TILE_WIDTH);
yOffset = offset / BLOCK_WIDTH;
xOffset = offset % BLOCK_WIDTH;
yIndex = by * O_TILE_WIDTH + yOffset - (MASK_WIDTH / 2);
xIndex = bx * O_TILE_WIDTH + xOffset - (MASK_WIDTH / 2);
index = (yIndex * imageWidth + xIndex) * channels + k;
if (yOffset < BLOCK_WIDTH && xOffset < BLOCK_WIDTH) {
if (xIndex >= 0 && xIndex < imageWidth &&
yIndex >= 0 && yIndex < imageHeight) {
N_ds[yOffset][xOffset] = N[index];
} else {
N_ds[yOffset][xOffset] = 0.0f;
}
} else {}
__syncthreads();
for (int i = 0; i < MASK_WIDTH; i++) {
for (int j = 0; j < MASK_WIDTH; j++) {
accum += N_ds[ty + i][tx + j] * M[i * MASK_WIDTH + j];
}
}
if (tempY < imageHeight && tempX < imageWidth) {
P[(tempY * imageWidth + tempX) * channels + k] = clamp(accum);
} else {}
__syncthreads();
}
}
int main(int argc, char *argv[]) {
wbArg_t arg;
int maskRows;
int maskColumns;
int imageChannels;
int imageWidth;
int imageHeight;
char *inputImageFile;
char *inputMaskFile;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *hostMaskData;
float *deviceInputImageData;
float *deviceOutputImageData;
float *deviceMaskData;
arg = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(arg, 0);
inputMaskFile = wbArg_getInputFile(arg, 1);
inputImage = wbImport(inputImageFile);
hostMaskData = (float *)wbImport(inputMaskFile, &maskRows, &maskColumns);
assert(maskRows == MASK_WIDTH); /* mask height is fixed to 5 */
assert(maskColumns == MASK_WIDTH); /* mask width is fixed to 5 */
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
//allocate device memory
cudaMalloc((void **) &deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
cudaMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
cudaMalloc((void **) &deviceMaskData, maskRows * maskColumns * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
//copy host memory to device
cudaMemcpy(deviceInputImageData, hostInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(deviceMaskData, hostMaskData, maskRows * maskColumns * sizeof(float), cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
//initialize thread block and kernel grid dimensions
//invoke CUDA kernel
dim3 dimBlock(O_TILE_WIDTH, O_TILE_WIDTH, 1);
dim3 dimGrid(((imageWidth - 1) / O_TILE_WIDTH) + 1, ((imageHeight - 1) / O_TILE_WIDTH) + 1, 1);
convolution_2D_kernel<<<dimGrid, dimBlock>>>(deviceOutputImageData, deviceInputImageData, imageHeight, imageWidth, imageChannels, deviceMaskData);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
//copy results from device to host
cudaMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
wbSolution(arg, outputImage);
//deallocate device memory
cudaFree(deviceInputImageData);
cudaFree(deviceOutputImageData);
cudaFree(deviceMaskData);
free(hostMaskData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
return 0;
}
|
959fc20b417d3798ff7bd5efd53ee7f8ae0824c7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/parquet/parquet_gpu.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/table/row_operators.cuh>
#include <rmm/exec_policy.hpp>
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
initialize_chunk_hash_maps_kernel(device_span<EncColumnChunk> chunks)
{
auto chunk = chunks[blockIdx.x];
auto t = threadIdx.x;
// fut: Now that per-chunk dict is same size as ck.num_values, try to not use one block per chunk
for (size_t i = 0; i < chunk.dict_map_size; i += block_size) {
if (t + i < chunk.dict_map_size) {
new (&chunk.dict_map_slots[t + i].first) map_type::atomic_key_type{KEY_SENTINEL};
new (&chunk.dict_map_slots[t + i].second) map_type::atomic_mapped_type{VALUE_SENTINEL};
}
}
}
template <typename T>
struct equality_functor {
column_device_view const& col;
__device__ bool operator()(size_type lhs_idx, size_type rhs_idx)
{
// We don't call this for nulls so this is fine
return equality_compare(col.element<T>(lhs_idx), col.element<T>(rhs_idx));
}
};
template <typename T>
struct hash_functor {
column_device_view const& col;
__device__ auto operator()(size_type idx) { return MurmurHash3_32<T>{}(col.element<T>(idx)); }
};
struct map_insert_fn {
map_type::device_mutable_view& map;
template <typename T>
__device__ bool operator()(column_device_view const& col, size_type i)
{
if constexpr (column_device_view::has_element_accessor<T>()) {
auto hash_fn = hash_functor<T>{col};
auto equality_fn = equality_functor<T>{col};
return map.insert(std::make_pair(i, i), hash_fn, equality_fn);
} else {
cudf_assert(false && "Unsupported type to insert in map");
}
return false;
}
};
struct map_find_fn {
map_type::device_view& map;
template <typename T>
__device__ auto operator()(column_device_view const& col, size_type i)
{
if constexpr (column_device_view::has_element_accessor<T>()) {
auto hash_fn = hash_functor<T>{col};
auto equality_fn = equality_functor<T>{col};
return map.find(i, hash_fn, equality_fn);
} else {
cudf_assert(false && "Unsupported type to insert in map");
}
return map.end();
}
};
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
populate_chunk_hash_maps_kernel(cudf::detail::device_2dspan<EncColumnChunk> chunks,
size_type num_rows)
{
auto col_idx = blockIdx.y;
auto block_x = blockIdx.x;
auto t = threadIdx.x;
auto start_row =
block_x *
max_page_fragment_size; // This is fragment size. all chunks are multiple of these many rows.
size_type end_row = min(start_row + max_page_fragment_size, num_rows);
__shared__ EncColumnChunk* s_chunk;
__shared__ parquet_column_device_view s_col;
__shared__ size_type s_start_value_idx;
__shared__ size_type s_num_values;
if (t == 0) {
// Find the chunk this block is a part of
size_type num_rowgroups = chunks.size().first;
size_type rg_idx = 0;
while (rg_idx < num_rowgroups) {
if (auto ck = chunks[rg_idx][col_idx];
start_row >= ck.start_row and start_row < ck.start_row + ck.num_rows) {
break;
}
++rg_idx;
}
s_chunk = &chunks[rg_idx][col_idx];
s_col = *(s_chunk->col_desc);
}
__syncthreads();
if (not s_chunk->use_dictionary) { return; }
if (t == 0) {
// Find the bounds of values in leaf column to be inserted into the map for current chunk
auto col = *(s_col.parent_column);
auto start_value_idx = start_row;
auto end_value_idx = end_row;
while (col.type().id() == type_id::LIST or col.type().id() == type_id::STRUCT) {
if (col.type().id() == type_id::STRUCT) {
start_value_idx += col.offset();
end_value_idx += col.offset();
col = col.child(0);
} else {
auto offset_col = col.child(lists_column_view::offsets_column_index);
start_value_idx = offset_col.element<size_type>(start_value_idx + col.offset());
end_value_idx = offset_col.element<size_type>(end_value_idx + col.offset());
col = col.child(lists_column_view::child_column_index);
}
}
s_start_value_idx = start_value_idx;
s_num_values = end_value_idx - start_value_idx;
}
__syncthreads();
column_device_view const& data_col = *s_col.leaf_column;
using block_reduce = hipcub::BlockReduce<size_type, block_size>;
__shared__ typename block_reduce::TempStorage reduce_storage;
// Make a view of the hash map
auto hash_map_mutable = map_type::device_mutable_view(
s_chunk->dict_map_slots, s_chunk->dict_map_size, KEY_SENTINEL, VALUE_SENTINEL);
auto hash_map = map_type::device_view(
s_chunk->dict_map_slots, s_chunk->dict_map_size, KEY_SENTINEL, VALUE_SENTINEL);
__shared__ int total_num_dict_entries;
for (size_type i = 0; i < s_num_values; i += block_size) {
// add the value to hash map
size_type val_idx = i + t + s_start_value_idx;
bool is_valid =
(i + t < s_num_values && val_idx < data_col.size()) and data_col.is_valid(val_idx);
// insert element at val_idx to hash map and count successful insertions
size_type is_unique = 0;
size_type uniq_elem_size = 0;
if (is_valid) {
auto found_slot = type_dispatcher(data_col.type(), map_find_fn{hash_map}, data_col, val_idx);
if (found_slot == hash_map.end()) {
is_unique =
type_dispatcher(data_col.type(), map_insert_fn{hash_map_mutable}, data_col, val_idx);
uniq_elem_size = [&]() -> size_type {
if (not is_unique) { return 0; }
switch (s_col.physical_type) {
case Type::INT32: return 4;
case Type::INT64: return 8;
case Type::INT96: return 12;
case Type::FLOAT: return 4;
case Type::DOUBLE: return 8;
case Type::BYTE_ARRAY:
if (data_col.type().id() == type_id::STRING) {
// Strings are stored as 4 byte length + string bytes
return 4 + data_col.element<string_view>(val_idx).size_bytes();
}
case Type::FIXED_LEN_BYTE_ARRAY:
default: cudf_assert(false && "Unsupported type for dictionary encoding"); return 0;
}
}();
}
}
__syncthreads();
auto num_unique = block_reduce(reduce_storage).Sum(is_unique);
__syncthreads();
auto uniq_data_size = block_reduce(reduce_storage).Sum(uniq_elem_size);
if (t == 0) {
total_num_dict_entries = atomicAdd(&s_chunk->num_dict_entries, num_unique);
total_num_dict_entries += num_unique;
atomicAdd(&s_chunk->uniq_data_size, uniq_data_size);
}
__syncthreads();
// Check if the num unique values in chunk has already exceeded max dict size and early exit
if (total_num_dict_entries > MAX_DICT_SIZE) { return; }
}
}
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
collect_map_entries_kernel(device_span<EncColumnChunk> chunks)
{
auto& chunk = chunks[blockIdx.x];
if (not chunk.use_dictionary) { return; }
auto t = threadIdx.x;
auto map =
map_type::device_view(chunk.dict_map_slots, chunk.dict_map_size, KEY_SENTINEL, VALUE_SENTINEL);
__shared__ size_type counter;
if (t == 0) counter = 0;
__syncthreads();
for (size_t i = 0; i < chunk.dict_map_size; i += block_size) {
if (t + i < chunk.dict_map_size) {
auto slot = map.begin_slot() + t + i;
auto key = static_cast<map_type::key_type>(slot->first);
if (key != KEY_SENTINEL) {
auto loc = atomicAdd(&counter, 1);
cudf_assert(loc < MAX_DICT_SIZE && "Number of filled slots exceeds max dict size");
chunk.dict_data[loc] = key;
// If sorting dict page ever becomes a hard requirement, enable the following statement and
// add a dict sorting step before storing into the slot's second field.
// chunk.dict_data_idx[loc] = t + i;
slot->second.store(loc);
// TODO: ^ This doesn't need to be atomic. Try casting to value_type ptr and just writing.
}
}
}
}
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
get_dictionary_indices_kernel(cudf::detail::device_2dspan<EncColumnChunk> chunks,
size_type num_rows)
{
auto col_idx = blockIdx.y;
auto block_x = blockIdx.x;
auto t = threadIdx.x;
size_type start_row = block_x * max_page_fragment_size;
size_type end_row = min(start_row + max_page_fragment_size, num_rows);
__shared__ EncColumnChunk s_chunk;
__shared__ parquet_column_device_view s_col;
__shared__ size_type s_start_value_idx;
__shared__ size_type s_ck_start_val_idx;
__shared__ size_type s_num_values;
if (t == 0) {
// Find the chunk this block is a part of
size_type num_rowgroups = chunks.size().first;
size_type rg_idx = 0;
while (rg_idx < num_rowgroups) {
if (auto ck = chunks[rg_idx][col_idx];
start_row >= ck.start_row and start_row < ck.start_row + ck.num_rows) {
break;
}
++rg_idx;
}
s_chunk = chunks[rg_idx][col_idx];
s_col = *(s_chunk.col_desc);
// Find the bounds of values in leaf column to be inserted into the map for current chunk
auto col = *(s_col.parent_column);
auto start_value_idx = start_row;
auto end_value_idx = end_row;
auto chunk_start_val_idx = s_chunk.start_row;
while (col.type().id() == type_id::LIST or col.type().id() == type_id::STRUCT) {
if (col.type().id() == type_id::STRUCT) {
start_value_idx += col.offset();
chunk_start_val_idx += col.offset();
end_value_idx += col.offset();
col = col.child(0);
} else {
auto offset_col = col.child(lists_column_view::offsets_column_index);
start_value_idx = offset_col.element<size_type>(start_value_idx + col.offset());
chunk_start_val_idx = offset_col.element<size_type>(chunk_start_val_idx + col.offset());
end_value_idx = offset_col.element<size_type>(end_value_idx + col.offset());
col = col.child(lists_column_view::child_column_index);
}
}
s_start_value_idx = start_value_idx;
s_ck_start_val_idx = chunk_start_val_idx;
s_num_values = end_value_idx - start_value_idx;
}
__syncthreads();
if (not s_chunk.use_dictionary) { return; }
column_device_view const& data_col = *s_col.leaf_column;
auto map = map_type::device_view(
s_chunk.dict_map_slots, s_chunk.dict_map_size, KEY_SENTINEL, VALUE_SENTINEL);
for (size_t i = 0; i < s_num_values; i += block_size) {
if (t + i < s_num_values) {
auto val_idx = s_start_value_idx + t + i;
bool is_valid =
(i + t < s_num_values && val_idx < data_col.size()) ? data_col.is_valid(val_idx) : false;
if (is_valid) {
auto found_slot = type_dispatcher(data_col.type(), map_find_fn{map}, data_col, val_idx);
cudf_assert(found_slot != map.end() &&
"Unable to find value in map in dictionary index construction");
if (found_slot != map.end()) {
// No need for atomic as this is not going to be modified by any other thread
auto* val_ptr = reinterpret_cast<map_type::mapped_type*>(&found_slot->second);
s_chunk.dict_index[val_idx - s_ck_start_val_idx] = *val_ptr;
}
}
}
}
}
void initialize_chunk_hash_maps(device_span<EncColumnChunk> chunks, rmm::cuda_stream_view stream)
{
constexpr int block_size = 1024;
hipLaunchKernelGGL(( initialize_chunk_hash_maps_kernel<block_size>)
, dim3(chunks.size()), dim3(block_size), 0, stream.value(), chunks);
}
void populate_chunk_hash_maps(cudf::detail::device_2dspan<EncColumnChunk> chunks,
size_type num_rows,
rmm::cuda_stream_view stream)
{
constexpr int block_size = 256;
auto const grid_x = cudf::detail::grid_1d(num_rows, max_page_fragment_size);
auto const num_columns = chunks.size().second;
dim3 const dim_grid(grid_x.num_blocks, num_columns);
hipLaunchKernelGGL(( populate_chunk_hash_maps_kernel<block_size>)
, dim3(dim_grid), dim3(block_size), 0, stream.value(), chunks, num_rows);
}
void collect_map_entries(device_span<EncColumnChunk> chunks, rmm::cuda_stream_view stream)
{
constexpr int block_size = 1024;
hipLaunchKernelGGL(( collect_map_entries_kernel<block_size>), dim3(chunks.size()), dim3(block_size), 0, stream.value(), chunks);
}
void get_dictionary_indices(cudf::detail::device_2dspan<EncColumnChunk> chunks,
size_type num_rows,
rmm::cuda_stream_view stream)
{
constexpr int block_size = 256;
auto const grid_x = cudf::detail::grid_1d(num_rows, max_page_fragment_size);
auto const num_columns = chunks.size().second;
dim3 const dim_grid(grid_x.num_blocks, num_columns);
hipLaunchKernelGGL(( get_dictionary_indices_kernel<block_size>)
, dim3(dim_grid), dim3(block_size), 0, stream.value(), chunks, num_rows);
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
|
959fc20b417d3798ff7bd5efd53ee7f8ae0824c7.cu
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/parquet/parquet_gpu.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/table/row_operators.cuh>
#include <rmm/exec_policy.hpp>
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
initialize_chunk_hash_maps_kernel(device_span<EncColumnChunk> chunks)
{
auto chunk = chunks[blockIdx.x];
auto t = threadIdx.x;
// fut: Now that per-chunk dict is same size as ck.num_values, try to not use one block per chunk
for (size_t i = 0; i < chunk.dict_map_size; i += block_size) {
if (t + i < chunk.dict_map_size) {
new (&chunk.dict_map_slots[t + i].first) map_type::atomic_key_type{KEY_SENTINEL};
new (&chunk.dict_map_slots[t + i].second) map_type::atomic_mapped_type{VALUE_SENTINEL};
}
}
}
template <typename T>
struct equality_functor {
column_device_view const& col;
__device__ bool operator()(size_type lhs_idx, size_type rhs_idx)
{
// We don't call this for nulls so this is fine
return equality_compare(col.element<T>(lhs_idx), col.element<T>(rhs_idx));
}
};
template <typename T>
struct hash_functor {
column_device_view const& col;
__device__ auto operator()(size_type idx) { return MurmurHash3_32<T>{}(col.element<T>(idx)); }
};
struct map_insert_fn {
map_type::device_mutable_view& map;
template <typename T>
__device__ bool operator()(column_device_view const& col, size_type i)
{
if constexpr (column_device_view::has_element_accessor<T>()) {
auto hash_fn = hash_functor<T>{col};
auto equality_fn = equality_functor<T>{col};
return map.insert(std::make_pair(i, i), hash_fn, equality_fn);
} else {
cudf_assert(false && "Unsupported type to insert in map");
}
return false;
}
};
struct map_find_fn {
map_type::device_view& map;
template <typename T>
__device__ auto operator()(column_device_view const& col, size_type i)
{
if constexpr (column_device_view::has_element_accessor<T>()) {
auto hash_fn = hash_functor<T>{col};
auto equality_fn = equality_functor<T>{col};
return map.find(i, hash_fn, equality_fn);
} else {
cudf_assert(false && "Unsupported type to insert in map");
}
return map.end();
}
};
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
populate_chunk_hash_maps_kernel(cudf::detail::device_2dspan<EncColumnChunk> chunks,
size_type num_rows)
{
auto col_idx = blockIdx.y;
auto block_x = blockIdx.x;
auto t = threadIdx.x;
auto start_row =
block_x *
max_page_fragment_size; // This is fragment size. all chunks are multiple of these many rows.
size_type end_row = min(start_row + max_page_fragment_size, num_rows);
__shared__ EncColumnChunk* s_chunk;
__shared__ parquet_column_device_view s_col;
__shared__ size_type s_start_value_idx;
__shared__ size_type s_num_values;
if (t == 0) {
// Find the chunk this block is a part of
size_type num_rowgroups = chunks.size().first;
size_type rg_idx = 0;
while (rg_idx < num_rowgroups) {
if (auto ck = chunks[rg_idx][col_idx];
start_row >= ck.start_row and start_row < ck.start_row + ck.num_rows) {
break;
}
++rg_idx;
}
s_chunk = &chunks[rg_idx][col_idx];
s_col = *(s_chunk->col_desc);
}
__syncthreads();
if (not s_chunk->use_dictionary) { return; }
if (t == 0) {
// Find the bounds of values in leaf column to be inserted into the map for current chunk
auto col = *(s_col.parent_column);
auto start_value_idx = start_row;
auto end_value_idx = end_row;
while (col.type().id() == type_id::LIST or col.type().id() == type_id::STRUCT) {
if (col.type().id() == type_id::STRUCT) {
start_value_idx += col.offset();
end_value_idx += col.offset();
col = col.child(0);
} else {
auto offset_col = col.child(lists_column_view::offsets_column_index);
start_value_idx = offset_col.element<size_type>(start_value_idx + col.offset());
end_value_idx = offset_col.element<size_type>(end_value_idx + col.offset());
col = col.child(lists_column_view::child_column_index);
}
}
s_start_value_idx = start_value_idx;
s_num_values = end_value_idx - start_value_idx;
}
__syncthreads();
column_device_view const& data_col = *s_col.leaf_column;
using block_reduce = cub::BlockReduce<size_type, block_size>;
__shared__ typename block_reduce::TempStorage reduce_storage;
// Make a view of the hash map
auto hash_map_mutable = map_type::device_mutable_view(
s_chunk->dict_map_slots, s_chunk->dict_map_size, KEY_SENTINEL, VALUE_SENTINEL);
auto hash_map = map_type::device_view(
s_chunk->dict_map_slots, s_chunk->dict_map_size, KEY_SENTINEL, VALUE_SENTINEL);
__shared__ int total_num_dict_entries;
for (size_type i = 0; i < s_num_values; i += block_size) {
// add the value to hash map
size_type val_idx = i + t + s_start_value_idx;
bool is_valid =
(i + t < s_num_values && val_idx < data_col.size()) and data_col.is_valid(val_idx);
// insert element at val_idx to hash map and count successful insertions
size_type is_unique = 0;
size_type uniq_elem_size = 0;
if (is_valid) {
auto found_slot = type_dispatcher(data_col.type(), map_find_fn{hash_map}, data_col, val_idx);
if (found_slot == hash_map.end()) {
is_unique =
type_dispatcher(data_col.type(), map_insert_fn{hash_map_mutable}, data_col, val_idx);
uniq_elem_size = [&]() -> size_type {
if (not is_unique) { return 0; }
switch (s_col.physical_type) {
case Type::INT32: return 4;
case Type::INT64: return 8;
case Type::INT96: return 12;
case Type::FLOAT: return 4;
case Type::DOUBLE: return 8;
case Type::BYTE_ARRAY:
if (data_col.type().id() == type_id::STRING) {
// Strings are stored as 4 byte length + string bytes
return 4 + data_col.element<string_view>(val_idx).size_bytes();
}
case Type::FIXED_LEN_BYTE_ARRAY:
default: cudf_assert(false && "Unsupported type for dictionary encoding"); return 0;
}
}();
}
}
__syncthreads();
auto num_unique = block_reduce(reduce_storage).Sum(is_unique);
__syncthreads();
auto uniq_data_size = block_reduce(reduce_storage).Sum(uniq_elem_size);
if (t == 0) {
total_num_dict_entries = atomicAdd(&s_chunk->num_dict_entries, num_unique);
total_num_dict_entries += num_unique;
atomicAdd(&s_chunk->uniq_data_size, uniq_data_size);
}
__syncthreads();
// Check if the num unique values in chunk has already exceeded max dict size and early exit
if (total_num_dict_entries > MAX_DICT_SIZE) { return; }
}
}
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
collect_map_entries_kernel(device_span<EncColumnChunk> chunks)
{
auto& chunk = chunks[blockIdx.x];
if (not chunk.use_dictionary) { return; }
auto t = threadIdx.x;
auto map =
map_type::device_view(chunk.dict_map_slots, chunk.dict_map_size, KEY_SENTINEL, VALUE_SENTINEL);
__shared__ size_type counter;
if (t == 0) counter = 0;
__syncthreads();
for (size_t i = 0; i < chunk.dict_map_size; i += block_size) {
if (t + i < chunk.dict_map_size) {
auto slot = map.begin_slot() + t + i;
auto key = static_cast<map_type::key_type>(slot->first);
if (key != KEY_SENTINEL) {
auto loc = atomicAdd(&counter, 1);
cudf_assert(loc < MAX_DICT_SIZE && "Number of filled slots exceeds max dict size");
chunk.dict_data[loc] = key;
// If sorting dict page ever becomes a hard requirement, enable the following statement and
// add a dict sorting step before storing into the slot's second field.
// chunk.dict_data_idx[loc] = t + i;
slot->second.store(loc);
// TODO: ^ This doesn't need to be atomic. Try casting to value_type ptr and just writing.
}
}
}
}
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
get_dictionary_indices_kernel(cudf::detail::device_2dspan<EncColumnChunk> chunks,
size_type num_rows)
{
auto col_idx = blockIdx.y;
auto block_x = blockIdx.x;
auto t = threadIdx.x;
size_type start_row = block_x * max_page_fragment_size;
size_type end_row = min(start_row + max_page_fragment_size, num_rows);
__shared__ EncColumnChunk s_chunk;
__shared__ parquet_column_device_view s_col;
__shared__ size_type s_start_value_idx;
__shared__ size_type s_ck_start_val_idx;
__shared__ size_type s_num_values;
if (t == 0) {
// Find the chunk this block is a part of
size_type num_rowgroups = chunks.size().first;
size_type rg_idx = 0;
while (rg_idx < num_rowgroups) {
if (auto ck = chunks[rg_idx][col_idx];
start_row >= ck.start_row and start_row < ck.start_row + ck.num_rows) {
break;
}
++rg_idx;
}
s_chunk = chunks[rg_idx][col_idx];
s_col = *(s_chunk.col_desc);
// Find the bounds of values in leaf column to be inserted into the map for current chunk
auto col = *(s_col.parent_column);
auto start_value_idx = start_row;
auto end_value_idx = end_row;
auto chunk_start_val_idx = s_chunk.start_row;
while (col.type().id() == type_id::LIST or col.type().id() == type_id::STRUCT) {
if (col.type().id() == type_id::STRUCT) {
start_value_idx += col.offset();
chunk_start_val_idx += col.offset();
end_value_idx += col.offset();
col = col.child(0);
} else {
auto offset_col = col.child(lists_column_view::offsets_column_index);
start_value_idx = offset_col.element<size_type>(start_value_idx + col.offset());
chunk_start_val_idx = offset_col.element<size_type>(chunk_start_val_idx + col.offset());
end_value_idx = offset_col.element<size_type>(end_value_idx + col.offset());
col = col.child(lists_column_view::child_column_index);
}
}
s_start_value_idx = start_value_idx;
s_ck_start_val_idx = chunk_start_val_idx;
s_num_values = end_value_idx - start_value_idx;
}
__syncthreads();
if (not s_chunk.use_dictionary) { return; }
column_device_view const& data_col = *s_col.leaf_column;
auto map = map_type::device_view(
s_chunk.dict_map_slots, s_chunk.dict_map_size, KEY_SENTINEL, VALUE_SENTINEL);
for (size_t i = 0; i < s_num_values; i += block_size) {
if (t + i < s_num_values) {
auto val_idx = s_start_value_idx + t + i;
bool is_valid =
(i + t < s_num_values && val_idx < data_col.size()) ? data_col.is_valid(val_idx) : false;
if (is_valid) {
auto found_slot = type_dispatcher(data_col.type(), map_find_fn{map}, data_col, val_idx);
cudf_assert(found_slot != map.end() &&
"Unable to find value in map in dictionary index construction");
if (found_slot != map.end()) {
// No need for atomic as this is not going to be modified by any other thread
auto* val_ptr = reinterpret_cast<map_type::mapped_type*>(&found_slot->second);
s_chunk.dict_index[val_idx - s_ck_start_val_idx] = *val_ptr;
}
}
}
}
}
void initialize_chunk_hash_maps(device_span<EncColumnChunk> chunks, rmm::cuda_stream_view stream)
{
constexpr int block_size = 1024;
initialize_chunk_hash_maps_kernel<block_size>
<<<chunks.size(), block_size, 0, stream.value()>>>(chunks);
}
void populate_chunk_hash_maps(cudf::detail::device_2dspan<EncColumnChunk> chunks,
size_type num_rows,
rmm::cuda_stream_view stream)
{
constexpr int block_size = 256;
auto const grid_x = cudf::detail::grid_1d(num_rows, max_page_fragment_size);
auto const num_columns = chunks.size().second;
dim3 const dim_grid(grid_x.num_blocks, num_columns);
populate_chunk_hash_maps_kernel<block_size>
<<<dim_grid, block_size, 0, stream.value()>>>(chunks, num_rows);
}
void collect_map_entries(device_span<EncColumnChunk> chunks, rmm::cuda_stream_view stream)
{
constexpr int block_size = 1024;
collect_map_entries_kernel<block_size><<<chunks.size(), block_size, 0, stream.value()>>>(chunks);
}
void get_dictionary_indices(cudf::detail::device_2dspan<EncColumnChunk> chunks,
size_type num_rows,
rmm::cuda_stream_view stream)
{
constexpr int block_size = 256;
auto const grid_x = cudf::detail::grid_1d(num_rows, max_page_fragment_size);
auto const num_columns = chunks.size().second;
dim3 const dim_grid(grid_x.num_blocks, num_columns);
get_dictionary_indices_kernel<block_size>
<<<dim_grid, block_size, 0, stream.value()>>>(chunks, num_rows);
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
|
ad2fee8b9f600e854d0dfa62b44f3391b4e89247.hip
|
// !!! This is a file automatically generated by hipify!!!
//------------------------------------------------------------------------------
//
// Name: vadd.cu
//
// Purpose: CUDA implementation of VADD
//
// HISTORY: Written by Tom Deakin and Simon McIntosh-Smith, August 2013
//
//------------------------------------------------------------------------------
#include <stdio.h>
#include <hip/hip_runtime.h>
#define TOL (0.001) // tolerance used in floating point comparisons
#define LENGTH (1024) // length of vectors a, b, and c
/*************************************************************************************
* CUDA kernel
************************************************************************************/
__global__ void vadd(const float* a, const float* b, float* c, const unsigned int count)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < count)
{
c[i] = a[i] + b[i];
}
}
int main()
{
float h_a[LENGTH]; // a vector
float h_b[LENGTH]; // b vector
float h_c[LENGTH]; // c vector (a+b) returned from the compute device
float *d_a, *d_b, *d_c; // CUDA memory
unsigned int correct; // number of correct results
// Fill vectors a and b with random float values
int i = 0;
int count = LENGTH;
for (i = 0; i < count; i++)
{
h_a[i] = rand() / (float)RAND_MAX;
h_b[i] = rand() / (float)RAND_MAX;
}
// Allocate CUDA memory
hipMalloc(&d_a, sizeof(float) * LENGTH);
hipMalloc(&d_b, sizeof(float) * LENGTH);
hipMalloc(&d_c, sizeof(float) * LENGTH);
// Write buffers a and b to GPU memory
hipMemcpy(d_a, h_a, sizeof(float) * LENGTH, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, sizeof(float) * LENGTH, hipMemcpyHostToDevice);
dim3 numBlocks(LENGTH);
dim3 numThreads(1);
hipLaunchKernelGGL(( vadd), dim3(numBlocks), dim3(numThreads), 0, 0, d_a, d_b, d_c, LENGTH);
// Copy result array back to host memory
hipMemcpy(h_c, d_c, sizeof(float) * LENGTH, hipMemcpyDeviceToHost);
// Test the results
correct = 0;
float tmp;
for (i = 0; i < count; i++)
{
tmp = h_a[i] + h_b[i]; // assign element i of a+b to tmp
tmp -= h_c[i]; // compute deviation of expected and output result
if (tmp * tmp < TOL * TOL) // correct if square deviation is less than tolerance squared
correct++;
else
{
printf(" tmp %f h_a %f h_b %f h_c %f \n", tmp, h_a[i], h_b[i], h_c[i]);
}
}
// summarize results
printf("C = A+B: %d out of %d results were correct.\n", correct, count);
return EXIT_SUCCESS;
}
|
ad2fee8b9f600e854d0dfa62b44f3391b4e89247.cu
|
//------------------------------------------------------------------------------
//
// Name: vadd.cu
//
// Purpose: CUDA implementation of VADD
//
// HISTORY: Written by Tom Deakin and Simon McIntosh-Smith, August 2013
//
//------------------------------------------------------------------------------
#include <stdio.h>
#include <cuda.h>
#define TOL (0.001) // tolerance used in floating point comparisons
#define LENGTH (1024) // length of vectors a, b, and c
/*************************************************************************************
* CUDA kernel
************************************************************************************/
__global__ void vadd(const float* a, const float* b, float* c, const unsigned int count)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < count)
{
c[i] = a[i] + b[i];
}
}
int main()
{
float h_a[LENGTH]; // a vector
float h_b[LENGTH]; // b vector
float h_c[LENGTH]; // c vector (a+b) returned from the compute device
float *d_a, *d_b, *d_c; // CUDA memory
unsigned int correct; // number of correct results
// Fill vectors a and b with random float values
int i = 0;
int count = LENGTH;
for (i = 0; i < count; i++)
{
h_a[i] = rand() / (float)RAND_MAX;
h_b[i] = rand() / (float)RAND_MAX;
}
// Allocate CUDA memory
cudaMalloc(&d_a, sizeof(float) * LENGTH);
cudaMalloc(&d_b, sizeof(float) * LENGTH);
cudaMalloc(&d_c, sizeof(float) * LENGTH);
// Write buffers a and b to GPU memory
cudaMemcpy(d_a, h_a, sizeof(float) * LENGTH, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(float) * LENGTH, cudaMemcpyHostToDevice);
dim3 numBlocks(LENGTH);
dim3 numThreads(1);
vadd<<<numBlocks, numThreads>>>(d_a, d_b, d_c, LENGTH);
// Copy result array back to host memory
cudaMemcpy(h_c, d_c, sizeof(float) * LENGTH, cudaMemcpyDeviceToHost);
// Test the results
correct = 0;
float tmp;
for (i = 0; i < count; i++)
{
tmp = h_a[i] + h_b[i]; // assign element i of a+b to tmp
tmp -= h_c[i]; // compute deviation of expected and output result
if (tmp * tmp < TOL * TOL) // correct if square deviation is less than tolerance squared
correct++;
else
{
printf(" tmp %f h_a %f h_b %f h_c %f \n", tmp, h_a[i], h_b[i], h_c[i]);
}
}
// summarize results
printf("C = A+B: %d out of %d results were correct.\n", correct, count);
return EXIT_SUCCESS;
}
|
97f148aeae897fc46a694b2e7260bebc90ad4617.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cule/cule.hpp>
#include <cule/cuda.hpp>
#include <torchcule/atari_env.hpp>
#include <torchcule/atari_state.hpp>
#include <torchcule/atari_state.cpp>
using cule_policy = cule::cuda::parallel_execution_policy;
AtariEnv::
AtariEnv(const cule::atari::rom& cart,
const size_t num_envs,
const size_t noop_reset_steps)
: super_t(cart, num_envs, noop_reset_steps),
use_cuda(false),
gpu_id(-1),
cule_par(new agency::parallel_execution_policy())
{
}
AtariEnv::
~AtariEnv()
{
if(use_cuda)
{
delete &get_policy<cule_policy>();
}
else
{
delete &get_policy<agency::parallel_execution_policy>();
}
}
void
AtariEnv::
reset(uint32_t* seedBuffer)
{
if(use_cuda)
{
super_t::reset(get_policy<cule_policy>(), seedBuffer);
}
else
{
super_t::reset(get_policy<agency::parallel_execution_policy>(), seedBuffer);
}
}
void
AtariEnv::
reset_states()
{
if(use_cuda)
{
super_t::reset_states(get_policy<cule_policy>());
}
else
{
super_t::reset_states(get_policy<agency::parallel_execution_policy>());
}
}
void
AtariEnv::
get_states(const size_t num_states,
const int32_t* indices_ptr,
AtariState* states_ptr)
{
agency::vector<cule::atari::state> output_states(num_states);
agency::vector<cule::atari::frame_state> output_frame_states(num_states);
agency::vector<uint8_t> output_states_ram(256 * num_states);
if(use_cuda)
{
agency::vector<cule::atari::state, agency::cuda::allocator<cule::atari::state>> output_states_gpu(num_states);
agency::vector<cule::atari::frame_state, agency::cuda::allocator<cule::atari::frame_state>> output_frame_states_gpu(num_states);
agency::vector<uint8_t, agency::cuda::allocator<uint8_t>> output_states_ram_gpu(256 * num_states);
agency::vector<int32_t, agency::cuda::allocator<int32_t>> indices_gpu(indices_ptr, indices_ptr + num_states);
super_t::get_states(get_policy<cule_policy>(), num_states, indices_gpu.data(), output_states_gpu.data(), output_frame_states_gpu.data(), output_states_ram_gpu.data());
get_policy<cule_policy>().sync();
agency::detail::copy(get_policy<agency::parallel_execution_policy>(), output_states_gpu.begin(), output_states_gpu.end(), output_states.begin());
agency::detail::copy(get_policy<agency::parallel_execution_policy>(), output_frame_states_gpu.begin(), output_frame_states_gpu.end(), output_frame_states.begin());
agency::detail::copy(get_policy<agency::parallel_execution_policy>(), output_states_ram_gpu.begin(), output_states_ram_gpu.end(), output_states_ram.begin());
get_policy<cule_policy>().sync();
}
else
{
super_t::get_states(get_policy<agency::parallel_execution_policy>(), num_states, indices_ptr, output_states.data(), output_frame_states.data(), nullptr);
}
agency::bulk_invoke(get_policy<agency::parallel_execution_policy>()(num_states),
decode_states_functor{},
use_cuda,
this->cart,
states_ptr,
output_states.data(),
output_frame_states.data(),
output_states_ram.data());
}
void
AtariEnv::
set_states(const size_t num_states,
const int32_t* indices_ptr,
const AtariState* states_ptr)
{
agency::vector<cule::atari::state> input_states(num_states);
agency::vector<cule::atari::frame_state> input_frame_states(num_states);
agency::vector<uint8_t> input_states_ram(256 * num_states);
agency::bulk_invoke(agency::seq(num_states),
encode_states_functor{},
this->cart,
states_ptr,
input_states.data(),
input_frame_states.data(),
input_states_ram.data());
if(use_cuda)
{
agency::vector<cule::atari::state, agency::cuda::allocator<cule::atari::state>> input_states_gpu(input_states.begin(), input_states.end());
agency::vector<cule::atari::frame_state, agency::cuda::allocator<cule::atari::frame_state>> input_frame_states_gpu(input_frame_states.begin(), input_frame_states.end());
agency::vector<uint8_t, agency::cuda::allocator<uint8_t>> input_states_ram_gpu(input_states_ram.begin(), input_states_ram.end());
agency::vector<int32_t, agency::cuda::allocator<int32_t>> indices_gpu(indices_ptr, indices_ptr + num_states);
super_t::set_states(get_policy<cule_policy>(), num_states, indices_gpu.data(), input_states_gpu.data(),
input_frame_states_gpu.data(), input_states_ram_gpu.data());
get_policy<cule_policy>().sync();
}
else
{
super_t::set_states(get_policy<agency::parallel_execution_policy>(), num_states, indices_ptr,
input_states.data(), input_frame_states.data(), input_states_ram.data());
}
}
void
AtariEnv::
step(const bool fire_reset,
const cule::atari::Action* actionsBuffer,
uint8_t* doneBuffer)
{
if(use_cuda)
{
super_t::step(get_policy<cule_policy>(), fire_reset, actionsBuffer, doneBuffer);
}
else
{
super_t::step(get_policy<agency::parallel_execution_policy>(), fire_reset, actionsBuffer, doneBuffer);
}
}
void
AtariEnv::
get_data(const bool episodic_life,
uint8_t* doneBuffer,
int32_t* rewardsBuffer,
int32_t* livesBuffer)
{
if(use_cuda)
{
super_t::get_data(get_policy<cule_policy>(), episodic_life, doneBuffer, rewardsBuffer, livesBuffer);
}
else
{
super_t::get_data(get_policy<agency::parallel_execution_policy>(), episodic_life, doneBuffer, rewardsBuffer, livesBuffer);
}
}
void
AtariEnv::
two_step(const cule::atari::Action* playerABuffer,
const cule::atari::Action* playerBBuffer)
{
if(use_cuda)
{
super_t::two_step(get_policy<cule_policy>(), playerABuffer, playerBBuffer);
}
else
{
super_t::two_step(get_policy<agency::parallel_execution_policy>(), playerABuffer, playerBBuffer);
}
}
void
AtariEnv::
generate_frames(const bool rescale,
const bool last_frame,
const size_t num_channels,
uint8_t* imageBuffer)
{
if(use_cuda)
{
super_t::generate_frames(get_policy<cule_policy>(), rescale, last_frame, num_channels, imageBuffer);
}
else
{
super_t::generate_frames(get_policy<agency::parallel_execution_policy>(), rescale, last_frame, num_channels, imageBuffer);
}
}
void
AtariEnv::
generate_random_actions(cule::atari::Action* actionBuffer)
{
if(use_cuda)
{
super_t::generate_random_actions(get_policy<cule_policy>(), actionBuffer);
}
else
{
super_t::generate_random_actions(get_policy<agency::parallel_execution_policy>(), actionBuffer);
}
}
void
AtariEnv::
sync_other_stream(hipStream_t& stream)
{
if(use_cuda)
{
get_policy<cule_policy>().insert_other_stream(stream);
}
}
void
AtariEnv::
sync_this_stream(hipStream_t& stream)
{
if(use_cuda)
{
get_policy<cule_policy>().insert_this_stream(stream);
}
}
void
AtariEnv::
set_cuda(const bool use_cuda, const int32_t gpu_id)
{
if(this->use_cuda != use_cuda)
{
this->use_cuda = use_cuda;
if(use_cuda)
{
this->gpu_id = gpu_id;
CULE_ERRCHK(hipSetDevice(gpu_id));
delete &get_policy<agency::parallel_execution_policy>();
cule_par = new cule_policy();
}
else
{
this->gpu_id = -1;
delete &get_policy<cule_policy>();
cule_par = new agency::parallel_execution_policy();
}
}
}
template<typename ExecutionPolicy>
ExecutionPolicy&
AtariEnv::
get_policy()
{
assert(cule_par != nullptr);
if(gpu_id != -1)
{
CULE_ERRCHK(hipSetDevice(gpu_id));
}
return *reinterpret_cast<ExecutionPolicy*>(cule_par);
}
size_t
AtariEnv::
state_size()
{
return sizeof(cule::atari::state);
}
size_t
AtariEnv::
frame_state_size()
{
return sizeof(cule::atari::frame_state);
}
size_t
AtariEnv::
tia_update_size()
{
return cule::atari::ENV_UPDATE_SIZE;
}
#include <cule/atari/rom.cpp>
#include <cule/atari/wrapper.cpp>
|
97f148aeae897fc46a694b2e7260bebc90ad4617.cu
|
#include <cule/cule.hpp>
#include <cule/cuda.hpp>
#include <torchcule/atari_env.hpp>
#include <torchcule/atari_state.hpp>
#include <torchcule/atari_state.cpp>
using cule_policy = cule::cuda::parallel_execution_policy;
AtariEnv::
AtariEnv(const cule::atari::rom& cart,
const size_t num_envs,
const size_t noop_reset_steps)
: super_t(cart, num_envs, noop_reset_steps),
use_cuda(false),
gpu_id(-1),
cule_par(new agency::parallel_execution_policy())
{
}
AtariEnv::
~AtariEnv()
{
if(use_cuda)
{
delete &get_policy<cule_policy>();
}
else
{
delete &get_policy<agency::parallel_execution_policy>();
}
}
void
AtariEnv::
reset(uint32_t* seedBuffer)
{
if(use_cuda)
{
super_t::reset(get_policy<cule_policy>(), seedBuffer);
}
else
{
super_t::reset(get_policy<agency::parallel_execution_policy>(), seedBuffer);
}
}
void
AtariEnv::
reset_states()
{
if(use_cuda)
{
super_t::reset_states(get_policy<cule_policy>());
}
else
{
super_t::reset_states(get_policy<agency::parallel_execution_policy>());
}
}
void
AtariEnv::
get_states(const size_t num_states,
const int32_t* indices_ptr,
AtariState* states_ptr)
{
agency::vector<cule::atari::state> output_states(num_states);
agency::vector<cule::atari::frame_state> output_frame_states(num_states);
agency::vector<uint8_t> output_states_ram(256 * num_states);
if(use_cuda)
{
agency::vector<cule::atari::state, agency::cuda::allocator<cule::atari::state>> output_states_gpu(num_states);
agency::vector<cule::atari::frame_state, agency::cuda::allocator<cule::atari::frame_state>> output_frame_states_gpu(num_states);
agency::vector<uint8_t, agency::cuda::allocator<uint8_t>> output_states_ram_gpu(256 * num_states);
agency::vector<int32_t, agency::cuda::allocator<int32_t>> indices_gpu(indices_ptr, indices_ptr + num_states);
super_t::get_states(get_policy<cule_policy>(), num_states, indices_gpu.data(), output_states_gpu.data(), output_frame_states_gpu.data(), output_states_ram_gpu.data());
get_policy<cule_policy>().sync();
agency::detail::copy(get_policy<agency::parallel_execution_policy>(), output_states_gpu.begin(), output_states_gpu.end(), output_states.begin());
agency::detail::copy(get_policy<agency::parallel_execution_policy>(), output_frame_states_gpu.begin(), output_frame_states_gpu.end(), output_frame_states.begin());
agency::detail::copy(get_policy<agency::parallel_execution_policy>(), output_states_ram_gpu.begin(), output_states_ram_gpu.end(), output_states_ram.begin());
get_policy<cule_policy>().sync();
}
else
{
super_t::get_states(get_policy<agency::parallel_execution_policy>(), num_states, indices_ptr, output_states.data(), output_frame_states.data(), nullptr);
}
agency::bulk_invoke(get_policy<agency::parallel_execution_policy>()(num_states),
decode_states_functor{},
use_cuda,
this->cart,
states_ptr,
output_states.data(),
output_frame_states.data(),
output_states_ram.data());
}
void
AtariEnv::
set_states(const size_t num_states,
const int32_t* indices_ptr,
const AtariState* states_ptr)
{
agency::vector<cule::atari::state> input_states(num_states);
agency::vector<cule::atari::frame_state> input_frame_states(num_states);
agency::vector<uint8_t> input_states_ram(256 * num_states);
agency::bulk_invoke(agency::seq(num_states),
encode_states_functor{},
this->cart,
states_ptr,
input_states.data(),
input_frame_states.data(),
input_states_ram.data());
if(use_cuda)
{
agency::vector<cule::atari::state, agency::cuda::allocator<cule::atari::state>> input_states_gpu(input_states.begin(), input_states.end());
agency::vector<cule::atari::frame_state, agency::cuda::allocator<cule::atari::frame_state>> input_frame_states_gpu(input_frame_states.begin(), input_frame_states.end());
agency::vector<uint8_t, agency::cuda::allocator<uint8_t>> input_states_ram_gpu(input_states_ram.begin(), input_states_ram.end());
agency::vector<int32_t, agency::cuda::allocator<int32_t>> indices_gpu(indices_ptr, indices_ptr + num_states);
super_t::set_states(get_policy<cule_policy>(), num_states, indices_gpu.data(), input_states_gpu.data(),
input_frame_states_gpu.data(), input_states_ram_gpu.data());
get_policy<cule_policy>().sync();
}
else
{
super_t::set_states(get_policy<agency::parallel_execution_policy>(), num_states, indices_ptr,
input_states.data(), input_frame_states.data(), input_states_ram.data());
}
}
void
AtariEnv::
step(const bool fire_reset,
const cule::atari::Action* actionsBuffer,
uint8_t* doneBuffer)
{
if(use_cuda)
{
super_t::step(get_policy<cule_policy>(), fire_reset, actionsBuffer, doneBuffer);
}
else
{
super_t::step(get_policy<agency::parallel_execution_policy>(), fire_reset, actionsBuffer, doneBuffer);
}
}
void
AtariEnv::
get_data(const bool episodic_life,
uint8_t* doneBuffer,
int32_t* rewardsBuffer,
int32_t* livesBuffer)
{
if(use_cuda)
{
super_t::get_data(get_policy<cule_policy>(), episodic_life, doneBuffer, rewardsBuffer, livesBuffer);
}
else
{
super_t::get_data(get_policy<agency::parallel_execution_policy>(), episodic_life, doneBuffer, rewardsBuffer, livesBuffer);
}
}
void
AtariEnv::
two_step(const cule::atari::Action* playerABuffer,
const cule::atari::Action* playerBBuffer)
{
if(use_cuda)
{
super_t::two_step(get_policy<cule_policy>(), playerABuffer, playerBBuffer);
}
else
{
super_t::two_step(get_policy<agency::parallel_execution_policy>(), playerABuffer, playerBBuffer);
}
}
void
AtariEnv::
generate_frames(const bool rescale,
const bool last_frame,
const size_t num_channels,
uint8_t* imageBuffer)
{
if(use_cuda)
{
super_t::generate_frames(get_policy<cule_policy>(), rescale, last_frame, num_channels, imageBuffer);
}
else
{
super_t::generate_frames(get_policy<agency::parallel_execution_policy>(), rescale, last_frame, num_channels, imageBuffer);
}
}
void
AtariEnv::
generate_random_actions(cule::atari::Action* actionBuffer)
{
if(use_cuda)
{
super_t::generate_random_actions(get_policy<cule_policy>(), actionBuffer);
}
else
{
super_t::generate_random_actions(get_policy<agency::parallel_execution_policy>(), actionBuffer);
}
}
void
AtariEnv::
sync_other_stream(cudaStream_t& stream)
{
if(use_cuda)
{
get_policy<cule_policy>().insert_other_stream(stream);
}
}
void
AtariEnv::
sync_this_stream(cudaStream_t& stream)
{
if(use_cuda)
{
get_policy<cule_policy>().insert_this_stream(stream);
}
}
void
AtariEnv::
set_cuda(const bool use_cuda, const int32_t gpu_id)
{
if(this->use_cuda != use_cuda)
{
this->use_cuda = use_cuda;
if(use_cuda)
{
this->gpu_id = gpu_id;
CULE_ERRCHK(cudaSetDevice(gpu_id));
delete &get_policy<agency::parallel_execution_policy>();
cule_par = new cule_policy();
}
else
{
this->gpu_id = -1;
delete &get_policy<cule_policy>();
cule_par = new agency::parallel_execution_policy();
}
}
}
template<typename ExecutionPolicy>
ExecutionPolicy&
AtariEnv::
get_policy()
{
assert(cule_par != nullptr);
if(gpu_id != -1)
{
CULE_ERRCHK(cudaSetDevice(gpu_id));
}
return *reinterpret_cast<ExecutionPolicy*>(cule_par);
}
size_t
AtariEnv::
state_size()
{
return sizeof(cule::atari::state);
}
size_t
AtariEnv::
frame_state_size()
{
return sizeof(cule::atari::frame_state);
}
size_t
AtariEnv::
tia_update_size()
{
return cule::atari::ENV_UPDATE_SIZE;
}
#include <cule/atari/rom.cpp>
#include <cule/atari/wrapper.cpp>
|
21d56f546c365a33e55126e59f602d26ab92914b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 128, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
21d56f546c365a33e55126e59f602d26ab92914b.cu
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 128, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
9d4705a66892cfe357f8c4146f4d716ed88e1d9e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// write your code into this file
// your kernels can be implemented directly here, or included
// function solveGPU is a device function: it can allocate memory, call CUDA kernels etc.
#define DEBUGNUMFPU 32
#define BLOCK 128
__global__ void GPUiter( const int* const __restrict__ contacts, const int* const __restrict__ in, int* const infections, const int n, const int iter, int* const out){
__shared__ int neighborhood[3][BLOCK+3];
int tid = threadIdx.x;
int x = (blockIdx.x * blockDim.x) + tid;
int y = blockIdx.y;
int maxIdx = min(BLOCK, n-(blockIdx.x * blockDim.x));
int pos = y*n + x;
// Save these in registers for faster access
int maxIdxInc = maxIdx + 1;
int tidInc = tid + 1;
if(x < n && y < n){
/*
if(threadIdx.x == 0){
neighborhood[0][0] = ;
neighborhood[1][0] = ;
neighborhood[2][0] = ;
}
if(threadIdx.x == maxIdx-1){
if(maxIdx == BLOCK){
neighborhood[0][maxIdxInc] = ;
neighborhood[1][maxIdxInc] = ;
neighborhood[2][maxIdxInc] = ;
}
else{ // maxIdx is less than BLOCK (ie N == 160)
neighborhood[0][maxIdxInc] = 0;
neighborhood[1][maxIdxInc] = 0;
neighborhood[2][maxIdxInc] = 0;
}
}*/
neighborhood[0][tidInc] = blockIdx.y != 0 ? in[(y-1)*n + x] : 0;
neighborhood[1][tidInc] = in[pos];
neighborhood[2][tidInc] = blockIdx.y < n - 1 ? in[(y+1)*n + x] : 0;
}
__syncthreads();
if(x < n && y < n){
int in_pos = neighborhood[1][tidInc];
if (in_pos > 0) {
out[pos] = in_pos - 1 == 0 ? -30 : in_pos - 1;
}
if (in_pos < 0) {
out[pos] = in_pos + 1;
}
if (in_pos == 0) {
int infected = 0;
if(tid > 0){
infected += (neighborhood[0][tid] > 0) ? 1 : 0;
infected += (neighborhood[1][tid] > 0) ? 1 : 0;
infected += (neighborhood[2][tid] > 0) ? 1 : 0;
}
if(tid + 2 < maxIdxInc){
infected += (neighborhood[0][tid + 2] > 0) ? 1 : 0;
infected += (neighborhood[1][tid + 2] > 0) ? 1 : 0;
infected += (neighborhood[2][tid + 2] > 0) ? 1 : 0;
}
infected += (neighborhood[0][tid + 1] > 0) ? 1 : 0;
infected += (neighborhood[2][tid + 1] > 0) ? 1 : 0;
int limit = contacts[pos];
if (infected > limit) {
out[pos] = 10;
atomicAdd(&infections[iter], 1);
}
else{
if(tid == 0 || tid == maxIdx-1){
if(infected + 3 <= limit){
out[pos] = 0;
}
else{
if(tid == 0){
infected += x != 0 && y != 0 ? in[(y-1)*n + (x-1)] > 0 ? 1 : 0 : 0;
infected += x != 0 ? in[pos-1] > 0 ? 1 : 0 : 0;
infected += x != 0 && y < n - 1 ? in[(y+1)*n + (x-1)] > 0 ? 1 : 0 : 0;
}
else{
if(maxIdx == BLOCK){
infected += blockIdx.x < ceil((float)n/BLOCK) - 1 && blockIdx.y != 0 ? in[(y-1) * n + (x + 1)] > 0 ? 1 : 0 : 0;
infected += blockIdx.x < ceil((float)n/BLOCK) - 1 ? in[y * n + (x+1)] > 0 ? 1 : 0 : 0;
infected += blockIdx.y < n - 1 && blockIdx.x < ceil((float)n/BLOCK) - 1 ? in[(y+1)*n + (x+1)] > 0 ? 1 : 0 : 0;
}
}
if (infected > limit) {
out[pos] = 10;
atomicAdd(&infections[iter], 1);
}
else{
out[pos] = 0;
}
}
}
else{
out[pos] = 0;
}
}
}
}
}
void solveGPU(const int* const contacts, int* const town, int* const infections, const int n, const int iters)
{
int* in = town;
int* out;
if(hipMalloc((void**)&out, n * n * sizeof(out[0])) != hipSuccess){
fprintf(stderr, "CudaMalloc failed ...\n");
return;
}
dim3 gridSize;
dim3 blockSize;
// If N is less than block, we reduce the amount of threads per block
if(n < BLOCK){
gridSize.x = 1;
gridSize.y = n;
gridSize.z = 1;
blockSize.x = n;
blockSize.y = 1;
blockSize.z = 1;
}
else{
gridSize.x = ceil((float)n/BLOCK);
gridSize.y = n;
gridSize.z = 1;
blockSize.x = BLOCK;
blockSize.y = 1;
blockSize.z = 1;
}
printf("GridSize: %d %d \n", gridSize.x, gridSize.y);
printf("BlockSize: %d %d \n", blockSize.x, blockSize.y);
for(int i = 0; i < iters; i++){
hipLaunchKernelGGL(( GPUiter), dim3(gridSize), dim3(blockSize), 0, 0, contacts, in, infections, n, i, out);
int* tmp = in;
in = out;
out = tmp;
}
if (in != town)
{
hipMemcpy(town, in, n * n * sizeof(town[0]), hipMemcpyDeviceToDevice);
hipFree(in);
}
else
{
hipFree(out);
}
}
|
9d4705a66892cfe357f8c4146f4d716ed88e1d9e.cu
|
// write your code into this file
// your kernels can be implemented directly here, or included
// function solveGPU is a device function: it can allocate memory, call CUDA kernels etc.
#define DEBUGNUMFPU 32
#define BLOCK 128
__global__ void GPUiter( const int* const __restrict__ contacts, const int* const __restrict__ in, int* const infections, const int n, const int iter, int* const out){
__shared__ int neighborhood[3][BLOCK+3];
int tid = threadIdx.x;
int x = (blockIdx.x * blockDim.x) + tid;
int y = blockIdx.y;
int maxIdx = min(BLOCK, n-(blockIdx.x * blockDim.x));
int pos = y*n + x;
// Save these in registers for faster access
int maxIdxInc = maxIdx + 1;
int tidInc = tid + 1;
if(x < n && y < n){
/*
if(threadIdx.x == 0){
neighborhood[0][0] = ;
neighborhood[1][0] = ;
neighborhood[2][0] = ;
}
if(threadIdx.x == maxIdx-1){
if(maxIdx == BLOCK){
neighborhood[0][maxIdxInc] = ;
neighborhood[1][maxIdxInc] = ;
neighborhood[2][maxIdxInc] = ;
}
else{ // maxIdx is less than BLOCK (ie N == 160)
neighborhood[0][maxIdxInc] = 0;
neighborhood[1][maxIdxInc] = 0;
neighborhood[2][maxIdxInc] = 0;
}
}*/
neighborhood[0][tidInc] = blockIdx.y != 0 ? in[(y-1)*n + x] : 0;
neighborhood[1][tidInc] = in[pos];
neighborhood[2][tidInc] = blockIdx.y < n - 1 ? in[(y+1)*n + x] : 0;
}
__syncthreads();
if(x < n && y < n){
int in_pos = neighborhood[1][tidInc];
if (in_pos > 0) {
out[pos] = in_pos - 1 == 0 ? -30 : in_pos - 1;
}
if (in_pos < 0) {
out[pos] = in_pos + 1;
}
if (in_pos == 0) {
int infected = 0;
if(tid > 0){
infected += (neighborhood[0][tid] > 0) ? 1 : 0;
infected += (neighborhood[1][tid] > 0) ? 1 : 0;
infected += (neighborhood[2][tid] > 0) ? 1 : 0;
}
if(tid + 2 < maxIdxInc){
infected += (neighborhood[0][tid + 2] > 0) ? 1 : 0;
infected += (neighborhood[1][tid + 2] > 0) ? 1 : 0;
infected += (neighborhood[2][tid + 2] > 0) ? 1 : 0;
}
infected += (neighborhood[0][tid + 1] > 0) ? 1 : 0;
infected += (neighborhood[2][tid + 1] > 0) ? 1 : 0;
int limit = contacts[pos];
if (infected > limit) {
out[pos] = 10;
atomicAdd(&infections[iter], 1);
}
else{
if(tid == 0 || tid == maxIdx-1){
if(infected + 3 <= limit){
out[pos] = 0;
}
else{
if(tid == 0){
infected += x != 0 && y != 0 ? in[(y-1)*n + (x-1)] > 0 ? 1 : 0 : 0;
infected += x != 0 ? in[pos-1] > 0 ? 1 : 0 : 0;
infected += x != 0 && y < n - 1 ? in[(y+1)*n + (x-1)] > 0 ? 1 : 0 : 0;
}
else{
if(maxIdx == BLOCK){
infected += blockIdx.x < ceil((float)n/BLOCK) - 1 && blockIdx.y != 0 ? in[(y-1) * n + (x + 1)] > 0 ? 1 : 0 : 0;
infected += blockIdx.x < ceil((float)n/BLOCK) - 1 ? in[y * n + (x+1)] > 0 ? 1 : 0 : 0;
infected += blockIdx.y < n - 1 && blockIdx.x < ceil((float)n/BLOCK) - 1 ? in[(y+1)*n + (x+1)] > 0 ? 1 : 0 : 0;
}
}
if (infected > limit) {
out[pos] = 10;
atomicAdd(&infections[iter], 1);
}
else{
out[pos] = 0;
}
}
}
else{
out[pos] = 0;
}
}
}
}
}
void solveGPU(const int* const contacts, int* const town, int* const infections, const int n, const int iters)
{
int* in = town;
int* out;
if(cudaMalloc((void**)&out, n * n * sizeof(out[0])) != cudaSuccess){
fprintf(stderr, "CudaMalloc failed ...\n");
return;
}
dim3 gridSize;
dim3 blockSize;
// If N is less than block, we reduce the amount of threads per block
if(n < BLOCK){
gridSize.x = 1;
gridSize.y = n;
gridSize.z = 1;
blockSize.x = n;
blockSize.y = 1;
blockSize.z = 1;
}
else{
gridSize.x = ceil((float)n/BLOCK);
gridSize.y = n;
gridSize.z = 1;
blockSize.x = BLOCK;
blockSize.y = 1;
blockSize.z = 1;
}
printf("GridSize: %d %d \n", gridSize.x, gridSize.y);
printf("BlockSize: %d %d \n", blockSize.x, blockSize.y);
for(int i = 0; i < iters; i++){
GPUiter<<<gridSize, blockSize>>>(contacts, in, infections, n, i, out);
int* tmp = in;
in = out;
out = tmp;
}
if (in != town)
{
cudaMemcpy(town, in, n * n * sizeof(town[0]), cudaMemcpyDeviceToDevice);
cudaFree(in);
}
else
{
cudaFree(out);
}
}
|
02c86ec5cc33ae0b8387fe3354ef3391be505668.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2011 Kerem KAT
//
// http://dissipatedheat.com/
// Do not hesisate to contact me about usage of the code or to make comments
// about the code. Your feedback will be appreciated.
// keremkat<@>gmail<.>com
//
// Kodun kullanm hakknda veya yorum yapmak iin benimle iletiim kurmaktan
// ekinmeyiniz. Geri bildirimleriniz deerlendirilecektir.
// keremkat<@>gmail<.>com
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "invert.h"
/**
\file invert.cu
CUDA invert kernelinin launcher metodunu ve kernelini tanmlar.
*/
/** Kernel 1 griddeki blok boyutu ( BLOCK_SIZE x BLOCK_SIZE kare bloklar ). */
#define BLOCK_SIZE (32)
/** GPU zamann lmek iin 1 yapnz. */
#define ENABLE_TIMING_CODE 0
/**
Grntnn tersini alan kernel.
\param image [0, 1] aralna normalize edilmi, BGR kanal sral grntnn GPU belleindeki adresi.
\param width Grntnn piksel olarak genilii
\param height Grntnn piksel olarak ykseklii
Metod GPU zerinde alr, ktsn image parametresinin zerine yazar.
*/
__global__
void gpuInvert(
float* image,
int width,
int height
)
{
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int cIdx = ( row * width + col ) * 3; // 3 ile arpm RGB iin, linearIndex.
// normalize edilmi pikselleri 1'den karttmzda grntnn negatifini alm oluruz.
*( image + cIdx ) = 1 - *( image + cIdx ); // Blue kanal
*( image + cIdx + 1 ) = 1 - *( image + cIdx + 1 ); // Green kanal
*( image + cIdx + 2 ) = 1 - *( image + cIdx + 2 ); // Red kanal
}
/**
\ref ptKernelLauncher tipinde metod.
\param d_Image [0, 1] aralna normalize edilmi, BGR kanal sral grntnn GPU belleindeki adresi.
\param width Grntnn piksel olarak genilii
\param height Grntnn piksel olarak ykseklii
\ref gpuInvert kernelini Grid ve Block boyutlarn ayarlayarak aran metod.
*/
void deviceInvertLaunch(
float *d_Image,
int width,
int height
)
{
// launch kernel
dim3 dimBlock( BLOCK_SIZE, BLOCK_SIZE );
dim3 dimGrid( width / dimBlock.x, height / dimBlock.y );
#if ENABLE_TIMING_CODE
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
#endif
hipLaunchKernelGGL(( gpuInvert), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_Image, width, height);
#if ENABLE_TIMING_CODE
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
// block until the device has completed
hipDeviceSynchronize();
printf("gpuInvert kernel time: %.3f ms\n", elapsedTime);
#endif
hipDeviceSynchronize();
// check if kernel execution generated an error
// Check for any CUDA errors
checkCUDAError("kernel invocation");
}
|
02c86ec5cc33ae0b8387fe3354ef3391be505668.cu
|
// Copyright (c) 2011 Kerem KAT
//
// http://dissipatedheat.com/
// Do not hesisate to contact me about usage of the code or to make comments
// about the code. Your feedback will be appreciated.
// keremkat<@>gmail<.>com
//
// Kodun kullanımı hakkında veya yorum yapmak için benimle iletişim kurmaktan
// çekinmeyiniz. Geri bildirimleriniz değerlendirilecektir.
// keremkat<@>gmail<.>com
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "invert.h"
/**
\file invert.cu
CUDA invert kernelinin launcher metodunu ve kernelini tanımlar.
*/
/** Kernel 1 griddeki blok boyutu ( BLOCK_SIZE x BLOCK_SIZE kare bloklar ). */
#define BLOCK_SIZE (32)
/** GPU zamanını ölçmek için 1 yapınız. */
#define ENABLE_TIMING_CODE 0
/**
Görüntünün tersini alan kernel.
\param image [0, 1] aralığına normalize edilmiş, BGR kanal sıralı görüntünün GPU belleğindeki adresi.
\param width Görüntünün piksel olarak genişliği
\param height Görüntünün piksel olarak yüksekliği
Metod GPU üzerinde çalışır, çıktısını image parametresinin üzerine yazar.
*/
__global__
void gpuInvert(
float* image,
int width,
int height
)
{
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int cIdx = ( row * width + col ) * 3; // 3 ile çarpım RGB için, linearIndex.
// normalize edilmiş pikselleri 1'den çıkarttığımızda görüntünün negatifini almış oluruz.
*( image + cIdx ) = 1 - *( image + cIdx ); // Blue kanalı
*( image + cIdx + 1 ) = 1 - *( image + cIdx + 1 ); // Green kanalı
*( image + cIdx + 2 ) = 1 - *( image + cIdx + 2 ); // Red kanalı
}
/**
\ref ptKernelLauncher tipinde metod.
\param d_Image [0, 1] aralığına normalize edilmiş, BGR kanal sıralı görüntünün GPU belleğindeki adresi.
\param width Görüntünün piksel olarak genişliği
\param height Görüntünün piksel olarak yüksekliği
\ref gpuInvert kernelini Grid ve Block boyutlarını ayarlayarak çağıran metod.
*/
void deviceInvertLaunch(
float *d_Image,
int width,
int height
)
{
// launch kernel
dim3 dimBlock( BLOCK_SIZE, BLOCK_SIZE );
dim3 dimGrid( width / dimBlock.x, height / dimBlock.y );
#if ENABLE_TIMING_CODE
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
gpuInvert<<< dimGrid, dimBlock >>>( d_Image, width, height);
#if ENABLE_TIMING_CODE
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
// block until the device has completed
cudaThreadSynchronize();
printf("gpuInvert kernel time: %.3f ms\n", elapsedTime);
#endif
cudaThreadSynchronize();
// check if kernel execution generated an error
// Check for any CUDA errors
checkCUDAError("kernel invocation");
}
|
a2bd4a5d363eead7353136dc3e95bbca8a849066.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__
void count(
TUPLE *lt,
TUPLE *rt,
int *count,
int *r_p,
int *radix,
int *lp,
int right,
int left
)
{
int x = blockIdx.x*blockDim.x*gridDim.y + blockDim.x*blockIdx.y + threadIdx.x;
//insert partition left table in shared memory
__shared__ TUPLE sub_lt[B_ROW_NUM];
for(int i=lp[blockIdx.x] + threadIdx.x,j=threadIdx.x; i<lp[blockIdx.x+1]; i += blockDim.x, j += blockDim.x){
if(j<B_ROW_NUM){
sub_lt[j].key = lt[i].key;
sub_lt[j].val = lt[i].val;
}
}
__syncthreads();
int temp=0;
int temp2 = r_p[radix[blockIdx.x]+1];
int temp3 = lp[blockIdx.x+1] - lp[blockIdx.x];
int count_x_temp = 0;
for(int k=r_p[radix[blockIdx.x]]+threadIdx.x ; k<temp2 ; k += blockDim.x){
temp = rt[k].val;
for(int i=0; i<temp3 ;i++){
if(sub_lt[i].val == temp){
count_x_temp++;
}
}
}
count[x] = count_x_temp;
if(x == left-1){
count[x+1] = 0;
}
}
__global__ void join(
TUPLE *lt,
TUPLE *rt,
RESULT *jt,
int *count,
int *r_p,
int *radix,
int *lp,
int right,
int left
)
{
//int x = blockIdx.x*blockDim.x + threadIdx.x;
int x = blockIdx.x*blockDim.x*gridDim.y + blockDim.x*blockIdx.y + threadIdx.x;
__shared__ TUPLE sub_lt[B_ROW_NUM];
for(int i=lp[blockIdx.x] + threadIdx.x,j=threadIdx.x; i<lp[blockIdx.x+1]; i += blockDim.x, j += blockDim.x){
if(j<B_ROW_NUM){
sub_lt[j].key = lt[i].key;
sub_lt[j].val = lt[i].val;
}
}
__syncthreads();
/*
int x_limit = 0;
if(r_p[radix[blockIdx.x]+1] - r_p[radix[blockIdx.x]]%GRID_SIZE_Y == 0){
x_limit = (r_p[radix[blockIdx.x]+1] - r_p[radix[blockIdx.x]])/GRID_SIZE_Y;
}else{
x_limit = (r_p[radix[blockIdx.x]+1] - r_p[radix[blockIdx.x]])/GRID_SIZE_Y + 1;
}
*/
TUPLE temp;
int temp2 = r_p[radix[blockIdx.x]+1];
int temp3 = lp[blockIdx.x+1] - lp[blockIdx.x];
int tcount=count[x];
for(int k=r_p[radix[blockIdx.x]]+threadIdx.x ; k<temp2 ; k += blockDim.x){
temp.key = rt[k].key;
temp.val = rt[k].val;
for(int i=0; i<temp3 ;i++){
if(sub_lt[i].val == temp.val){
jt[tcount].rkey = temp.key;
jt[tcount].rval = temp.val;
jt[tcount].lkey = sub_lt[i].key;
jt[tcount].lval = sub_lt[i].val;
/*
temp.key = sub_lt[i].key;
temp.val = sub_lt[i].val;
temp.key = sub_lt[i].key;
temp.val = sub_lt[i].val;
*/
tcount++;
}
}
}
}
}
|
a2bd4a5d363eead7353136dc3e95bbca8a849066.cu
|
#include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__
void count(
TUPLE *lt,
TUPLE *rt,
int *count,
int *r_p,
int *radix,
int *lp,
int right,
int left
)
{
int x = blockIdx.x*blockDim.x*gridDim.y + blockDim.x*blockIdx.y + threadIdx.x;
//insert partition left table in shared memory
__shared__ TUPLE sub_lt[B_ROW_NUM];
for(int i=lp[blockIdx.x] + threadIdx.x,j=threadIdx.x; i<lp[blockIdx.x+1]; i += blockDim.x, j += blockDim.x){
if(j<B_ROW_NUM){
sub_lt[j].key = lt[i].key;
sub_lt[j].val = lt[i].val;
}
}
__syncthreads();
int temp=0;
int temp2 = r_p[radix[blockIdx.x]+1];
int temp3 = lp[blockIdx.x+1] - lp[blockIdx.x];
int count_x_temp = 0;
for(int k=r_p[radix[blockIdx.x]]+threadIdx.x ; k<temp2 ; k += blockDim.x){
temp = rt[k].val;
for(int i=0; i<temp3 ;i++){
if(sub_lt[i].val == temp){
count_x_temp++;
}
}
}
count[x] = count_x_temp;
if(x == left-1){
count[x+1] = 0;
}
}
__global__ void join(
TUPLE *lt,
TUPLE *rt,
RESULT *jt,
int *count,
int *r_p,
int *radix,
int *lp,
int right,
int left
)
{
//int x = blockIdx.x*blockDim.x + threadIdx.x;
int x = blockIdx.x*blockDim.x*gridDim.y + blockDim.x*blockIdx.y + threadIdx.x;
__shared__ TUPLE sub_lt[B_ROW_NUM];
for(int i=lp[blockIdx.x] + threadIdx.x,j=threadIdx.x; i<lp[blockIdx.x+1]; i += blockDim.x, j += blockDim.x){
if(j<B_ROW_NUM){
sub_lt[j].key = lt[i].key;
sub_lt[j].val = lt[i].val;
}
}
__syncthreads();
/*
int x_limit = 0;
if(r_p[radix[blockIdx.x]+1] - r_p[radix[blockIdx.x]]%GRID_SIZE_Y == 0){
x_limit = (r_p[radix[blockIdx.x]+1] - r_p[radix[blockIdx.x]])/GRID_SIZE_Y;
}else{
x_limit = (r_p[radix[blockIdx.x]+1] - r_p[radix[blockIdx.x]])/GRID_SIZE_Y + 1;
}
*/
TUPLE temp;
int temp2 = r_p[radix[blockIdx.x]+1];
int temp3 = lp[blockIdx.x+1] - lp[blockIdx.x];
int tcount=count[x];
for(int k=r_p[radix[blockIdx.x]]+threadIdx.x ; k<temp2 ; k += blockDim.x){
temp.key = rt[k].key;
temp.val = rt[k].val;
for(int i=0; i<temp3 ;i++){
if(sub_lt[i].val == temp.val){
jt[tcount].rkey = temp.key;
jt[tcount].rval = temp.val;
jt[tcount].lkey = sub_lt[i].key;
jt[tcount].lval = sub_lt[i].val;
/*
temp.key = sub_lt[i].key;
temp.val = sub_lt[i].val;
temp.key = sub_lt[i].key;
temp.val = sub_lt[i].val;
*/
tcount++;
}
}
}
}
}
|
5d2eb50e74ed3ff7c43caae675245455f195ea89.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include "cpu_bitmap.h"
#include "bitmap_help.h"
#define CHANNELS 4
__global__ void Rotate(float* Source, float* Destination, int sizeX, int sizeY, float deg)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;// Kernel definition
int j = blockIdx.y * blockDim.y + threadIdx.y;
int xc = sizeX - sizeX/2;
int yc = sizeY - sizeY/2;
int newx = ((float)i-xc)*cos(deg) - ((float)j-yc)*sin(deg) + xc;
int newy = ((float)i-xc)*sin(deg) + ((float)j-yc)*cos(deg) + yc;
if (newx >= 0 && newx < sizeX && newy >= 0 && newy < sizeY)
{
putPixVal(Destination, sizeX, i , j, readPixVal(Source, sizeX, newx, newy));
}
}
__device__ float readPixVal( float* ImgSrc,int ImgWidth,int x,int y)
{
return (float)ImgSrc[y*ImgWidth+x];
}
__device__ void putPixVal( float* ImgSrc,int ImgWidth,int x,int y, float floatVal)
{
ImgSrc[y*ImgWidth+x] = floatVal;
}
__host__ void imgProc(unsigned char * map, int size, int width, int height) {
unsigned char* device_image;
size_t imageSize= size;
float deg=90;
hipMalloc((void**)&device_image,imageSize);
hipMemcpy(device_image,map,imageSize,hipMemcpyHostToDevice);
dim3 gridSize(width,height);
dim3 blockSize(1,1,1);
hipLaunchKernelGGL(( Rotate), dim3(gridSize),dim3(blockSize), 0, 0, map,device_image, width, height,deg);
hipDeviceSynchronize();
hipMemcpy(map,device_image,imageSize,hipMemcpyDeviceToHost);
hipFree(device_image);
return;
}
int main(void) {
char fname[50];
FILE* infile;
unsigned short ftype;
tagBMFH bitHead;
tagBMIH bitInfoHead;
tagRGBQ *pRgb;
printf("Please enter the .bmp file name: ");
scanf("%s", fname);
strcat(fname,".bmp");
infile = fopen(fname, "rb");
if (infile != NULL) {
printf("File open successful.\n");
fread(&ftype, 1, sizeof(unsigned short), infile);
if (ftype != 0x4d42)
{
printf("File not .bmp format.\n");
return 1;
}
fread(&bitHead, 1, sizeof(tagBMFH), infile);
fread(&bitInfoHead, 1, sizeof(tagBMIH), infile);
}
else {
printf("File open fail.\n");
return 1;
}
if (bitInfoHead.biBitCount < 24) {
long nPlateNum = long(pow(2, double(bitInfoHead.biBitCount)));
pRgb = (tagRGBQ *)malloc(nPlateNum * sizeof(tagRGBQ));
memset(pRgb, 0, nPlateNum * sizeof(tagRGBQ));
int num = fread(pRgb, 4, nPlateNum, infile);
}
int width = bitInfoHead.biWidth;
int height = bitInfoHead.biHeight;
int l_width = 4 * ((width * bitInfoHead.biBitCount + 31) / 32);
long nData = height * l_width;
unsigned char *pColorData = (unsigned char *)malloc(nData);
memset(pColorData, 0, nData);
fread(pColorData, 1, nData, infile);
fclose(infile);
CPUBitmap dataOfBmp(width, height);
unsigned char *map = dataOfBmp.get_ptr();
if (bitInfoHead.biBitCount < 24) {
int k, index = 0;
if (bitInfoHead.biBitCount == 1) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j / 8;
mixIndex = pColorData[k];
if (j % 8 < 7) mixIndex = mixIndex << (7 - (j % 8));
mixIndex = mixIndex >> 7;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 2) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j / 4;
mixIndex = pColorData[k];
if (j % 4 < 3) mixIndex = mixIndex << (6 - 2 * (j % 4));
mixIndex = mixIndex >> 6;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 4) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j / 2;
mixIndex = pColorData[k];
if (j % 2 == 0) mixIndex = mixIndex << 4;
mixIndex = mixIndex >> 4;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 8) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j;
mixIndex = pColorData[k];
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 16) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j * 2;
unsigned char shortTemp = pColorData[k + 1] << 8;
mixIndex = pColorData[k] + shortTemp;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
}
else {
int k, index = 0;
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
k = i * l_width + j * 3;
map[index * 4 + 0] = pColorData[k + 2];
map[index * 4 + 1] = pColorData[k + 1];
map[index * 4 + 2] = pColorData[k];
index++;
}
}
imgProc(map, dataOfBmp.image_size(), width, height);
dataOfBmp.display_and_exit();
return 0;
}
|
5d2eb50e74ed3ff7c43caae675245455f195ea89.cu
|
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include "cpu_bitmap.h"
#include "bitmap_help.h"
#define CHANNELS 4
__global__ void Rotate(float* Source, float* Destination, int sizeX, int sizeY, float deg)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;// Kernel definition
int j = blockIdx.y * blockDim.y + threadIdx.y;
int xc = sizeX - sizeX/2;
int yc = sizeY - sizeY/2;
int newx = ((float)i-xc)*cos(deg) - ((float)j-yc)*sin(deg) + xc;
int newy = ((float)i-xc)*sin(deg) + ((float)j-yc)*cos(deg) + yc;
if (newx >= 0 && newx < sizeX && newy >= 0 && newy < sizeY)
{
putPixVal(Destination, sizeX, i , j, readPixVal(Source, sizeX, newx, newy));
}
}
__device__ float readPixVal( float* ImgSrc,int ImgWidth,int x,int y)
{
return (float)ImgSrc[y*ImgWidth+x];
}
__device__ void putPixVal( float* ImgSrc,int ImgWidth,int x,int y, float floatVal)
{
ImgSrc[y*ImgWidth+x] = floatVal;
}
__host__ void imgProc(unsigned char * map, int size, int width, int height) {
unsigned char* device_image;
size_t imageSize= size;
float deg=90;
cudaMalloc((void**)&device_image,imageSize);
cudaMemcpy(device_image,map,imageSize,cudaMemcpyHostToDevice);
dim3 gridSize(width,height);
dim3 blockSize(1,1,1);
Rotate<<<gridSize,blockSize>>>(map,device_image, width, height,deg);
cudaDeviceSynchronize();
cudaMemcpy(map,device_image,imageSize,cudaMemcpyDeviceToHost);
cudaFree(device_image);
return;
}
int main(void) {
char fname[50];
FILE* infile;
unsigned short ftype;
tagBMFH bitHead;
tagBMIH bitInfoHead;
tagRGBQ *pRgb;
printf("Please enter the .bmp file name: ");
scanf("%s", fname);
strcat(fname,".bmp");
infile = fopen(fname, "rb");
if (infile != NULL) {
printf("File open successful.\n");
fread(&ftype, 1, sizeof(unsigned short), infile);
if (ftype != 0x4d42)
{
printf("File not .bmp format.\n");
return 1;
}
fread(&bitHead, 1, sizeof(tagBMFH), infile);
fread(&bitInfoHead, 1, sizeof(tagBMIH), infile);
}
else {
printf("File open fail.\n");
return 1;
}
if (bitInfoHead.biBitCount < 24) {
long nPlateNum = long(pow(2, double(bitInfoHead.biBitCount)));
pRgb = (tagRGBQ *)malloc(nPlateNum * sizeof(tagRGBQ));
memset(pRgb, 0, nPlateNum * sizeof(tagRGBQ));
int num = fread(pRgb, 4, nPlateNum, infile);
}
int width = bitInfoHead.biWidth;
int height = bitInfoHead.biHeight;
int l_width = 4 * ((width * bitInfoHead.biBitCount + 31) / 32);
long nData = height * l_width;
unsigned char *pColorData = (unsigned char *)malloc(nData);
memset(pColorData, 0, nData);
fread(pColorData, 1, nData, infile);
fclose(infile);
CPUBitmap dataOfBmp(width, height);
unsigned char *map = dataOfBmp.get_ptr();
if (bitInfoHead.biBitCount < 24) {
int k, index = 0;
if (bitInfoHead.biBitCount == 1) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j / 8;
mixIndex = pColorData[k];
if (j % 8 < 7) mixIndex = mixIndex << (7 - (j % 8));
mixIndex = mixIndex >> 7;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 2) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j / 4;
mixIndex = pColorData[k];
if (j % 4 < 3) mixIndex = mixIndex << (6 - 2 * (j % 4));
mixIndex = mixIndex >> 6;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 4) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j / 2;
mixIndex = pColorData[k];
if (j % 2 == 0) mixIndex = mixIndex << 4;
mixIndex = mixIndex >> 4;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 8) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j;
mixIndex = pColorData[k];
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 16) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j * 2;
unsigned char shortTemp = pColorData[k + 1] << 8;
mixIndex = pColorData[k] + shortTemp;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
}
else {
int k, index = 0;
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
k = i * l_width + j * 3;
map[index * 4 + 0] = pColorData[k + 2];
map[index * 4 + 1] = pColorData[k + 1];
map[index * 4 + 2] = pColorData[k];
index++;
}
}
imgProc(map, dataOfBmp.image_size(), width, height);
dataOfBmp.display_and_exit();
return 0;
}
|
abe011c3173fa81330c74525a95a13d379185279.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdiocu.h>
#include <stringcu.h>
#include <sentinel.h>
#include <assert.h>
enum {
MODULE_SIMPLE = 500,
MODULE_STRING,
};
struct module_simple {
sentinelMessage Base;
int Value;
__device__ module_simple(bool wait, int value) : Base(wait, MODULE_SIMPLE), Value(value) { sentinelDeviceSend(&Base, sizeof(module_simple)); }
int RC;
};
struct module_string {
static __forceinline__ __device__ char *Prepare(module_string *t, char *data, char *dataEnd, intptr_t offset) {
int strLength = (t->Str ? (int)strlen(t->Str) + 1 : 0);
char *str = (char *)(data += ROUND8_(sizeof(*t)));
char *end = (char *)(data += strLength);
if (end > dataEnd) return nullptr;
memcpy(str, t->Str, strLength);
t->Str = str + offset;
return end;
}
sentinelMessage Base;
const char *Str;
__device__ module_string(bool wait, const char *str) : Base(wait, MODULE_STRING, 1024, SENTINELPREPARE(Prepare)), Str(str) { sentinelDeviceSend(&Base, sizeof(module_string)); }
int RC;
};
bool sentinelModuleExecutor(void *tag, sentinelMessage *data, int length, char *(**hostPrepare)(void*, char*, char*, intptr_t)) {
switch (data->OP) {
case MODULE_SIMPLE: { module_simple *msg = (module_simple *)data; msg->RC = msg->Value; return true; }
case MODULE_STRING: { module_string *msg = (module_string *)data; msg->RC = (int)strlen(msg->Str); return true; }
}
return false;
}
static sentinelExecutor _moduleExecutor = { nullptr, "module", sentinelModuleExecutor, nullptr };
static __global__ void g_sentinel_test1() {
printf("sentinel_test1\n");
//// SENTINELDEVICESEND ////
// extern __device__ void sentinelDeviceSend(sentinelMessage *msg, int msgLength);
module_simple a0(true, 1);
int a0a = a0.RC;
assert(a0a == 1);
module_string a1(true, "test");
int a1a = a1.RC;
assert(a1a == 4);
}
hipError_t sentinel_test1() {
sentinelRegisterExecutor(&_moduleExecutor);
hipLaunchKernelGGL(( g_sentinel_test1), dim3(1), dim3(1), 0, 0, ); return hipDeviceSynchronize();
}
//// SENTINELDEFAULTEXECUTOR ////
// extern bool sentinelDefaultExecutor(void *tag, sentinelMessage *data, int length, char *(**hostPrepare)(void*,char*,char*,intptr_t));
//// SENTINELSERVERINITIALIZE, SENTINELSERVERSHUTDOWN ////
// extern void sentinelServerInitialize(sentinelExecutor *executor = nullptr, char *mapHostName = SENTINEL_NAME, bool hostSentinel = true, bool deviceSentinel = true);
// extern void sentinelServerShutdown();
//// SENTINELDEVICESEND ////
// extern __device__ void sentinelDeviceSend(sentinelMessage *msg, int msgLength);
//// SENTINELCLIENTINITIALIZE, SENTINELCLIENTSHUTDOWN ////
// extern void sentinelClientInitialize(char *mapHostName = SENTINEL_NAME);
// extern void sentinelClientShutdown();
//// SENTINELCLIENTSEND ////
// extern void sentinelClientSend(sentinelMessage *msg, int msgLength);
//// SENTINELFINDEXECUTOR, SENTINELREGISTEREXECUTOR, SENTINELUNREGISTEREXECUTOR ////
// extern sentinelExecutor *sentinelFindExecutor(const char *name, bool forDevice = true);
// extern void sentinelRegisterExecutor(sentinelExecutor *exec, bool makeDefault = false, bool forDevice = true);
// extern void sentinelUnregisterExecutor(sentinelExecutor *exec, bool forDevice = true);
//// SENTINELREGISTERFILEUTILS ////
// extern void sentinelRegisterFileUtils();
|
abe011c3173fa81330c74525a95a13d379185279.cu
|
#include <stdiocu.h>
#include <stringcu.h>
#include <sentinel.h>
#include <assert.h>
enum {
MODULE_SIMPLE = 500,
MODULE_STRING,
};
struct module_simple {
sentinelMessage Base;
int Value;
__device__ module_simple(bool wait, int value) : Base(wait, MODULE_SIMPLE), Value(value) { sentinelDeviceSend(&Base, sizeof(module_simple)); }
int RC;
};
struct module_string {
static __forceinline__ __device__ char *Prepare(module_string *t, char *data, char *dataEnd, intptr_t offset) {
int strLength = (t->Str ? (int)strlen(t->Str) + 1 : 0);
char *str = (char *)(data += ROUND8_(sizeof(*t)));
char *end = (char *)(data += strLength);
if (end > dataEnd) return nullptr;
memcpy(str, t->Str, strLength);
t->Str = str + offset;
return end;
}
sentinelMessage Base;
const char *Str;
__device__ module_string(bool wait, const char *str) : Base(wait, MODULE_STRING, 1024, SENTINELPREPARE(Prepare)), Str(str) { sentinelDeviceSend(&Base, sizeof(module_string)); }
int RC;
};
bool sentinelModuleExecutor(void *tag, sentinelMessage *data, int length, char *(**hostPrepare)(void*, char*, char*, intptr_t)) {
switch (data->OP) {
case MODULE_SIMPLE: { module_simple *msg = (module_simple *)data; msg->RC = msg->Value; return true; }
case MODULE_STRING: { module_string *msg = (module_string *)data; msg->RC = (int)strlen(msg->Str); return true; }
}
return false;
}
static sentinelExecutor _moduleExecutor = { nullptr, "module", sentinelModuleExecutor, nullptr };
static __global__ void g_sentinel_test1() {
printf("sentinel_test1\n");
//// SENTINELDEVICESEND ////
// extern __device__ void sentinelDeviceSend(sentinelMessage *msg, int msgLength);
module_simple a0(true, 1);
int a0a = a0.RC;
assert(a0a == 1);
module_string a1(true, "test");
int a1a = a1.RC;
assert(a1a == 4);
}
cudaError_t sentinel_test1() {
sentinelRegisterExecutor(&_moduleExecutor);
g_sentinel_test1<<<1, 1>>>(); return cudaDeviceSynchronize();
}
//// SENTINELDEFAULTEXECUTOR ////
// extern bool sentinelDefaultExecutor(void *tag, sentinelMessage *data, int length, char *(**hostPrepare)(void*,char*,char*,intptr_t));
//// SENTINELSERVERINITIALIZE, SENTINELSERVERSHUTDOWN ////
// extern void sentinelServerInitialize(sentinelExecutor *executor = nullptr, char *mapHostName = SENTINEL_NAME, bool hostSentinel = true, bool deviceSentinel = true);
// extern void sentinelServerShutdown();
//// SENTINELDEVICESEND ////
// extern __device__ void sentinelDeviceSend(sentinelMessage *msg, int msgLength);
//// SENTINELCLIENTINITIALIZE, SENTINELCLIENTSHUTDOWN ////
// extern void sentinelClientInitialize(char *mapHostName = SENTINEL_NAME);
// extern void sentinelClientShutdown();
//// SENTINELCLIENTSEND ////
// extern void sentinelClientSend(sentinelMessage *msg, int msgLength);
//// SENTINELFINDEXECUTOR, SENTINELREGISTEREXECUTOR, SENTINELUNREGISTEREXECUTOR ////
// extern sentinelExecutor *sentinelFindExecutor(const char *name, bool forDevice = true);
// extern void sentinelRegisterExecutor(sentinelExecutor *exec, bool makeDefault = false, bool forDevice = true);
// extern void sentinelUnregisterExecutor(sentinelExecutor *exec, bool forDevice = true);
//// SENTINELREGISTERFILEUTILS ////
// extern void sentinelRegisterFileUtils();
|
c190998f83df10796b97897e648a85f051b93abd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <math.h>
#include <sys/stat.h>
#include <display.h>
#include <pthread.h>
#include <math.h>
#include <operators.h>
#include <image.h>
#include <sobel.h>
#include <hamilton.h>
#include <malvar.h>
#include <gunturk.h>
#include <pronk.h>
#include <view.h>
#include <gpu.h>
#ifndef TITLE
#define TITLE "CUDA DEBAYER DEMO"
#endif
__global__
void f_cielab_enhance(float3* lab, size_t pitch_in, size_t width, size_t height, float angle, float sat, float bri, float ofs, float da, float db)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int y = (blockIdx.y * blockDim.y + threadIdx.y);
if (x >= width || y >= height) return;
auto px = View2DSym<float3>(lab, pitch_in, x, y, width, height);
px(0,0).y = cos(angle) * px(0,0).y + sin(angle) * px(0,0).z;
px(0,0).z = -sin(angle) * px(0,0).y + cos(angle) * px(0,0).z;
px(0,0).x *= bri;
px(0,0).x += ofs;
px(0,0).y *= sat;
px(0,0).z *= sat;
px(0,0).y += da;
px(0,0).z += db;
}
int main(int /*argc*/, char** /*argv*/)
{
int rc;
hipStream_t stream = 0;
try
{
printf("Selecting the best GPU\n");
selectGPU();
hipDeviceSetLimit(hipLimitMallocHeapSize, 128*1024*1024);
rc = hipStreamCreateWithFlags(&stream, hipStreamNonBlocking);
if (hipSuccess != rc) throw "Unable to create CUDA stream";
auto original = Image::load("kodak.ppm");
original->copyToDevice(stream);
original->printInfo();
// Debayer source and destination images
constexpr size_t debayer_count = 6;
Image* debayer[debayer_count] = {0};
for (int i=0; i<debayer_count; i++)
{
debayer[i] = Image::create(Image::Type::ppm, original->width, original->height);
}
// NEAREST NEIGHBOR
DebayerFilter debayerNN;
Image* bayer = DebayerFilter::pack(original, stream);
Image* bayer_colored = DebayerFilter::unpack(bayer,stream);
debayerNN.source = bayer;
debayerNN.destination = debayer[0];
debayerNN.run(stream);
debayer[0]->copyToHost(stream);
// BILINEAR
BilinearDebayerFilter bilinear;
bilinear.source = bayer;
bilinear.destination = debayer[1];
bilinear.run(stream);
debayer[1]->copyToHost(stream);
// MALVAR
MalvarFilter malvar;
malvar.source = bayer;
malvar.destination = debayer[2];
malvar.run(stream);
debayer[2]->copyToHost(stream);
// HAMILTON ADAMS
HamiltonFilter hamilton;
hamilton.source = bayer;
hamilton.destination = debayer[3];
hamilton.run(stream);
debayer[3]->copyToHost(stream);
// GUNTURK
GunturkFilter gunturk;
gunturk.source = bayer;
gunturk.destination = debayer[4];
gunturk.run(stream);
debayer[4]->copyToHost(stream);
// My own tests
PronkFilter pronk;
pronk.source = bayer;
pronk.destination = debayer[5];
pronk.run(stream);
debayer[5]->copyToHost(stream);
// Print statistics
hipDeviceSynchronize();
printf("PSNR\n");
for (size_t i=0; i<debayer_count; i++)
{
printf("- %d: %0.02f\n", i, debayer[i]->psnr(original));
}
// SETUP DISPLAY
CudaDisplay disp(TITLE, original->width, original->height);
disp.cudaMap(stream);
int i = 0;
int count = 10;
int scale = 1;
int dx = 0, dy = 0;
float ofs = 0;
float angle = 0.00;
float sat= 1, bri=1;
float da=0, db=0;
bool showEnhanced = true;
auto black = Image::create(Image::Type::ppm, original->width, original->height);
auto mask = Image::create(Image::Type::lab, original->width, original->height);
auto lab1 = Image::create(Image::Type::lab, original->width, original->height);
auto lab2 = Image::create(Image::Type::lab, original->width, original->height);
auto enhanced = Image::create(Image::Type::ppm, original->width, original->height);
Image* images[] = { original, bayer, bayer_colored,
debayer[0], debayer[1], debayer[2], debayer[3], debayer[4], debayer[5], lab1 };
while (true)
{
Image* img = images[i % count];
if (img->type == Image::Type::ppm && showEnhanced)
{
img->toLab(lab1, stream);
//black->toLab(lab2, stream);
//conv(mask, lab, zipper, stream);
//cmux(mask, mux,stream);
//blend(lab, mask, lab2, lab, stream);
dim3 blockSize = { 16, 16 };
dim3 gridSize = {
((int)original->width + blockSize.x - 1) / blockSize.x,
((int)original->height + blockSize.y - 1) / blockSize.y };
hipLaunchKernelGGL(( f_cielab_enhance) , dim3(gridSize), dim3(blockSize), 0, stream ,
(float3*)lab1->mem.device.data, lab1->mem.device.pitch,
lab1->width, lab1->height, angle, sat, bri, ofs, da, db);
enhanced->fromLab(lab1, stream);
img = enhanced;
}
display(&disp, img, scale, dx, dy, 0, stream);
hipStreamSynchronize(stream);
disp.cudaFinish(stream);
disp.render(stream);
rc = hipGetLastError();
if (hipSuccess != rc) throw "CUDA ERROR";
if (int e = disp.events())
{
if (e < 0)
{
disp.cudaUnmap(stream);
hipStreamDestroy(stream);
return 0;
}
else switch (e)
{
case ',': i--; if (i < 0) i=count-1; break;
case '.': i++; if (i >= count) i=0; break;
case '-': scale--; if (scale <= 0) scale = 1; break;
case '=': scale++; if (scale >= 32) scale = 32; break;
case 'w': dy-=10; break;
case 's': dy+=10; break;
case 'a': dx-=10; break;
case 'd': dx+=10; break;
case '[': bri *= 1.1f; break;
case ']': bri /= 1.1f; break;
case ';': sat *= 1.1f; break;
case '\'':sat /= 1.1f; break;
case 'n': ofs += 5;break;
case 'm': ofs -= 5;break;
case 'h': da += 3;break;
case 'j': da -= 3;break;
case 'y': db += 3;break;
case 'u': db -= 5;break;
case 'c': pronk.sharpness *= 1.1; pronk.run(stream); break;
case 'v': pronk.sharpness /= 1.1; pronk.run(stream); break;
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9': i = e - '0'; break;
case 'q':
bri = 1, sat = 1,
da = 0, db = 0, ofs = 0;
break;
case 'r':
dx = 0, dy = 0, scale = 1;
default: break;
}
}
usleep(100000);
}
}
catch (const char* &ex)
{
fprintf(stderr, "ERROR: %s\n", ex);
fflush(stderr);
return 1;
}
return 0;
}
|
c190998f83df10796b97897e648a85f051b93abd.cu
|
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <math.h>
#include <sys/stat.h>
#include <display.h>
#include <pthread.h>
#include <math.h>
#include <operators.h>
#include <image.h>
#include <sobel.h>
#include <hamilton.h>
#include <malvar.h>
#include <gunturk.h>
#include <pronk.h>
#include <view.h>
#include <gpu.h>
#ifndef TITLE
#define TITLE "CUDA DEBAYER DEMO"
#endif
__global__
void f_cielab_enhance(float3* lab, size_t pitch_in, size_t width, size_t height, float angle, float sat, float bri, float ofs, float da, float db)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int y = (blockIdx.y * blockDim.y + threadIdx.y);
if (x >= width || y >= height) return;
auto px = View2DSym<float3>(lab, pitch_in, x, y, width, height);
px(0,0).y = cos(angle) * px(0,0).y + sin(angle) * px(0,0).z;
px(0,0).z = -sin(angle) * px(0,0).y + cos(angle) * px(0,0).z;
px(0,0).x *= bri;
px(0,0).x += ofs;
px(0,0).y *= sat;
px(0,0).z *= sat;
px(0,0).y += da;
px(0,0).z += db;
}
int main(int /*argc*/, char** /*argv*/)
{
int rc;
cudaStream_t stream = 0;
try
{
printf("Selecting the best GPU\n");
selectGPU();
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 128*1024*1024);
rc = cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
if (cudaSuccess != rc) throw "Unable to create CUDA stream";
auto original = Image::load("kodak.ppm");
original->copyToDevice(stream);
original->printInfo();
// Debayer source and destination images
constexpr size_t debayer_count = 6;
Image* debayer[debayer_count] = {0};
for (int i=0; i<debayer_count; i++)
{
debayer[i] = Image::create(Image::Type::ppm, original->width, original->height);
}
// NEAREST NEIGHBOR
DebayerFilter debayerNN;
Image* bayer = DebayerFilter::pack(original, stream);
Image* bayer_colored = DebayerFilter::unpack(bayer,stream);
debayerNN.source = bayer;
debayerNN.destination = debayer[0];
debayerNN.run(stream);
debayer[0]->copyToHost(stream);
// BILINEAR
BilinearDebayerFilter bilinear;
bilinear.source = bayer;
bilinear.destination = debayer[1];
bilinear.run(stream);
debayer[1]->copyToHost(stream);
// MALVAR
MalvarFilter malvar;
malvar.source = bayer;
malvar.destination = debayer[2];
malvar.run(stream);
debayer[2]->copyToHost(stream);
// HAMILTON ADAMS
HamiltonFilter hamilton;
hamilton.source = bayer;
hamilton.destination = debayer[3];
hamilton.run(stream);
debayer[3]->copyToHost(stream);
// GUNTURK
GunturkFilter gunturk;
gunturk.source = bayer;
gunturk.destination = debayer[4];
gunturk.run(stream);
debayer[4]->copyToHost(stream);
// My own tests
PronkFilter pronk;
pronk.source = bayer;
pronk.destination = debayer[5];
pronk.run(stream);
debayer[5]->copyToHost(stream);
// Print statistics
cudaDeviceSynchronize();
printf("PSNR\n");
for (size_t i=0; i<debayer_count; i++)
{
printf("- %d: %0.02f\n", i, debayer[i]->psnr(original));
}
// SETUP DISPLAY
CudaDisplay disp(TITLE, original->width, original->height);
disp.cudaMap(stream);
int i = 0;
int count = 10;
int scale = 1;
int dx = 0, dy = 0;
float ofs = 0;
float angle = 0.00;
float sat= 1, bri=1;
float da=0, db=0;
bool showEnhanced = true;
auto black = Image::create(Image::Type::ppm, original->width, original->height);
auto mask = Image::create(Image::Type::lab, original->width, original->height);
auto lab1 = Image::create(Image::Type::lab, original->width, original->height);
auto lab2 = Image::create(Image::Type::lab, original->width, original->height);
auto enhanced = Image::create(Image::Type::ppm, original->width, original->height);
Image* images[] = { original, bayer, bayer_colored,
debayer[0], debayer[1], debayer[2], debayer[3], debayer[4], debayer[5], lab1 };
while (true)
{
Image* img = images[i % count];
if (img->type == Image::Type::ppm && showEnhanced)
{
img->toLab(lab1, stream);
//black->toLab(lab2, stream);
//conv(mask, lab, zipper, stream);
//cmux(mask, mux,stream);
//blend(lab, mask, lab2, lab, stream);
dim3 blockSize = { 16, 16 };
dim3 gridSize = {
((int)original->width + blockSize.x - 1) / blockSize.x,
((int)original->height + blockSize.y - 1) / blockSize.y };
f_cielab_enhance <<< gridSize, blockSize, 0, stream >>> (
(float3*)lab1->mem.device.data, lab1->mem.device.pitch,
lab1->width, lab1->height, angle, sat, bri, ofs, da, db);
enhanced->fromLab(lab1, stream);
img = enhanced;
}
display(&disp, img, scale, dx, dy, 0, stream);
cudaStreamSynchronize(stream);
disp.cudaFinish(stream);
disp.render(stream);
rc = cudaGetLastError();
if (cudaSuccess != rc) throw "CUDA ERROR";
if (int e = disp.events())
{
if (e < 0)
{
disp.cudaUnmap(stream);
cudaStreamDestroy(stream);
return 0;
}
else switch (e)
{
case ',': i--; if (i < 0) i=count-1; break;
case '.': i++; if (i >= count) i=0; break;
case '-': scale--; if (scale <= 0) scale = 1; break;
case '=': scale++; if (scale >= 32) scale = 32; break;
case 'w': dy-=10; break;
case 's': dy+=10; break;
case 'a': dx-=10; break;
case 'd': dx+=10; break;
case '[': bri *= 1.1f; break;
case ']': bri /= 1.1f; break;
case ';': sat *= 1.1f; break;
case '\'':sat /= 1.1f; break;
case 'n': ofs += 5;break;
case 'm': ofs -= 5;break;
case 'h': da += 3;break;
case 'j': da -= 3;break;
case 'y': db += 3;break;
case 'u': db -= 5;break;
case 'c': pronk.sharpness *= 1.1; pronk.run(stream); break;
case 'v': pronk.sharpness /= 1.1; pronk.run(stream); break;
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9': i = e - '0'; break;
case 'q':
bri = 1, sat = 1,
da = 0, db = 0, ofs = 0;
break;
case 'r':
dx = 0, dy = 0, scale = 1;
default: break;
}
}
usleep(100000);
}
}
catch (const char* &ex)
{
fprintf(stderr, "ERROR: %s\n", ex);
fflush(stderr);
return 1;
}
return 0;
}
|
6dba8876a718d213fa9918219cad80fe3837132e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
using namespace cv::gpu;
typedef unsigned char uchar;
typedef unsigned short ushort;
//////////////////////////////////////////////////////////////////////////////////
/// Bilateral filtering
namespace cv { namespace gpu { namespace device
{
namespace imgproc
{
__device__ __forceinline__ float norm_l1(const float& a) { return ::fabs(a); }
__device__ __forceinline__ float norm_l1(const float2& a) { return ::fabs(a.x) + ::fabs(a.y); }
__device__ __forceinline__ float norm_l1(const float3& a) { return ::fabs(a.x) + ::fabs(a.y) + ::fabs(a.z); }
__device__ __forceinline__ float norm_l1(const float4& a) { return ::fabs(a.x) + ::fabs(a.y) + ::fabs(a.z) + ::fabs(a.w); }
__device__ __forceinline__ float sqr(const float& a) { return a * a; }
template<typename T, typename B>
__global__ void bilateral_kernel(const PtrStepSz<T> src, PtrStep<T> dst, const B b, const int ksz, const float sigma_spatial2_inv_half, const float sigma_color2_inv_half)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= src.cols || y >= src.rows)
return;
value_type center = saturate_cast<value_type>(src(y, x));
value_type sum1 = VecTraits<value_type>::all(0);
float sum2 = 0;
int r = ksz / 2;
float r2 = (float)(r * r);
int tx = x - r + ksz;
int ty = y - r + ksz;
if (x - ksz/2 >=0 && y - ksz/2 >=0 && tx < src.cols && ty < src.rows)
{
for (int cy = y - r; cy < ty; ++cy)
for (int cx = x - r; cx < tx; ++cx)
{
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
if (space2 > r2)
continue;
value_type value = saturate_cast<value_type>(src(cy, cx));
float weight = ::exp(space2 * sigma_spatial2_inv_half + sqr(norm_l1(value - center)) * sigma_color2_inv_half);
sum1 = sum1 + weight * value;
sum2 = sum2 + weight;
}
}
else
{
for (int cy = y - r; cy < ty; ++cy)
for (int cx = x - r; cx < tx; ++cx)
{
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
if (space2 > r2)
continue;
value_type value = saturate_cast<value_type>(b.at(cy, cx, src.data, src.step));
float weight = ::exp(space2 * sigma_spatial2_inv_half + sqr(norm_l1(value - center)) * sigma_color2_inv_half);
sum1 = sum1 + weight * value;
sum2 = sum2 + weight;
}
}
dst(y, x) = saturate_cast<T>(sum1 / sum2);
}
template<typename T, template <typename> class B>
void bilateral_caller(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float sigma_spatial, float sigma_color, hipStream_t stream)
{
dim3 block (32, 8);
dim3 grid (divUp (src.cols, block.x), divUp (src.rows, block.y));
B<T> b(src.rows, src.cols);
float sigma_spatial2_inv_half = -0.5f/(sigma_spatial * sigma_spatial);
float sigma_color2_inv_half = -0.5f/(sigma_color * sigma_color);
cudaSafeCall( hipFuncSetCacheConfig (bilateral_kernel<T, B<T> >, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( bilateral_kernel), dim3(grid), dim3(block), 0, 0, (PtrStepSz<T>)src, (PtrStepSz<T>)dst, b, kernel_size, sigma_spatial2_inv_half, sigma_color2_inv_half);
cudaSafeCall ( hipGetLastError () );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template<typename T>
void bilateral_filter_gpu(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float gauss_spatial_coeff, float gauss_color_coeff, int borderMode, hipStream_t stream)
{
typedef void (*caller_t)(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float sigma_spatial, float sigma_color, hipStream_t stream);
#ifdef OPENCV_TINY_GPU_MODULE
static caller_t funcs[] =
{
bilateral_caller<T, BrdReflect101>,
bilateral_caller<T, BrdReplicate>,
0,
0,
0,
};
#else
static caller_t funcs[] =
{
bilateral_caller<T, BrdReflect101>,
bilateral_caller<T, BrdReplicate>,
bilateral_caller<T, BrdConstant>,
bilateral_caller<T, BrdReflect>,
bilateral_caller<T, BrdWrap>,
};
#endif
const caller_t caller = funcs[borderMode];
if (!caller)
cv::gpu::error("Unsupported input parameters for bilateral_filter", __FILE__, __LINE__, "");
caller(src, dst, kernel_size, gauss_spatial_coeff, gauss_color_coeff, stream);
}
}
}}}
#define OCV_INSTANTIATE_BILATERAL_FILTER(T) \
template void cv::gpu::device::imgproc::bilateral_filter_gpu<T>(const PtrStepSzb&, PtrStepSzb, int, float, float, int, hipStream_t);
OCV_INSTANTIATE_BILATERAL_FILTER(uchar)
//OCV_INSTANTIATE_BILATERAL_FILTER(uchar2)
OCV_INSTANTIATE_BILATERAL_FILTER(uchar3)
OCV_INSTANTIATE_BILATERAL_FILTER(uchar4)
#ifndef OPENCV_TINY_GPU_MODULE
//OCV_INSTANTIATE_BILATERAL_FILTER(schar)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar2)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar3)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar4)
OCV_INSTANTIATE_BILATERAL_FILTER(short)
//OCV_INSTANTIATE_BILATERAL_FILTER(short2)
OCV_INSTANTIATE_BILATERAL_FILTER(short3)
OCV_INSTANTIATE_BILATERAL_FILTER(short4)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort)
//OCV_INSTANTIATE_BILATERAL_FILTER(ushort2)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort3)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort4)
//OCV_INSTANTIATE_BILATERAL_FILTER(int)
//OCV_INSTANTIATE_BILATERAL_FILTER(int2)
//OCV_INSTANTIATE_BILATERAL_FILTER(int3)
//OCV_INSTANTIATE_BILATERAL_FILTER(int4)
#endif
OCV_INSTANTIATE_BILATERAL_FILTER(float)
//OCV_INSTANTIATE_BILATERAL_FILTER(float2)
OCV_INSTANTIATE_BILATERAL_FILTER(float3)
OCV_INSTANTIATE_BILATERAL_FILTER(float4)
#endif /* CUDA_DISABLER */
|
6dba8876a718d213fa9918219cad80fe3837132e.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
using namespace cv::gpu;
typedef unsigned char uchar;
typedef unsigned short ushort;
//////////////////////////////////////////////////////////////////////////////////
/// Bilateral filtering
namespace cv { namespace gpu { namespace device
{
namespace imgproc
{
__device__ __forceinline__ float norm_l1(const float& a) { return ::fabs(a); }
__device__ __forceinline__ float norm_l1(const float2& a) { return ::fabs(a.x) + ::fabs(a.y); }
__device__ __forceinline__ float norm_l1(const float3& a) { return ::fabs(a.x) + ::fabs(a.y) + ::fabs(a.z); }
__device__ __forceinline__ float norm_l1(const float4& a) { return ::fabs(a.x) + ::fabs(a.y) + ::fabs(a.z) + ::fabs(a.w); }
__device__ __forceinline__ float sqr(const float& a) { return a * a; }
template<typename T, typename B>
__global__ void bilateral_kernel(const PtrStepSz<T> src, PtrStep<T> dst, const B b, const int ksz, const float sigma_spatial2_inv_half, const float sigma_color2_inv_half)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= src.cols || y >= src.rows)
return;
value_type center = saturate_cast<value_type>(src(y, x));
value_type sum1 = VecTraits<value_type>::all(0);
float sum2 = 0;
int r = ksz / 2;
float r2 = (float)(r * r);
int tx = x - r + ksz;
int ty = y - r + ksz;
if (x - ksz/2 >=0 && y - ksz/2 >=0 && tx < src.cols && ty < src.rows)
{
for (int cy = y - r; cy < ty; ++cy)
for (int cx = x - r; cx < tx; ++cx)
{
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
if (space2 > r2)
continue;
value_type value = saturate_cast<value_type>(src(cy, cx));
float weight = ::exp(space2 * sigma_spatial2_inv_half + sqr(norm_l1(value - center)) * sigma_color2_inv_half);
sum1 = sum1 + weight * value;
sum2 = sum2 + weight;
}
}
else
{
for (int cy = y - r; cy < ty; ++cy)
for (int cx = x - r; cx < tx; ++cx)
{
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
if (space2 > r2)
continue;
value_type value = saturate_cast<value_type>(b.at(cy, cx, src.data, src.step));
float weight = ::exp(space2 * sigma_spatial2_inv_half + sqr(norm_l1(value - center)) * sigma_color2_inv_half);
sum1 = sum1 + weight * value;
sum2 = sum2 + weight;
}
}
dst(y, x) = saturate_cast<T>(sum1 / sum2);
}
template<typename T, template <typename> class B>
void bilateral_caller(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float sigma_spatial, float sigma_color, cudaStream_t stream)
{
dim3 block (32, 8);
dim3 grid (divUp (src.cols, block.x), divUp (src.rows, block.y));
B<T> b(src.rows, src.cols);
float sigma_spatial2_inv_half = -0.5f/(sigma_spatial * sigma_spatial);
float sigma_color2_inv_half = -0.5f/(sigma_color * sigma_color);
cudaSafeCall( cudaFuncSetCacheConfig (bilateral_kernel<T, B<T> >, cudaFuncCachePreferL1) );
bilateral_kernel<<<grid, block>>>((PtrStepSz<T>)src, (PtrStepSz<T>)dst, b, kernel_size, sigma_spatial2_inv_half, sigma_color2_inv_half);
cudaSafeCall ( cudaGetLastError () );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template<typename T>
void bilateral_filter_gpu(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float gauss_spatial_coeff, float gauss_color_coeff, int borderMode, cudaStream_t stream)
{
typedef void (*caller_t)(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float sigma_spatial, float sigma_color, cudaStream_t stream);
#ifdef OPENCV_TINY_GPU_MODULE
static caller_t funcs[] =
{
bilateral_caller<T, BrdReflect101>,
bilateral_caller<T, BrdReplicate>,
0,
0,
0,
};
#else
static caller_t funcs[] =
{
bilateral_caller<T, BrdReflect101>,
bilateral_caller<T, BrdReplicate>,
bilateral_caller<T, BrdConstant>,
bilateral_caller<T, BrdReflect>,
bilateral_caller<T, BrdWrap>,
};
#endif
const caller_t caller = funcs[borderMode];
if (!caller)
cv::gpu::error("Unsupported input parameters for bilateral_filter", __FILE__, __LINE__, "");
caller(src, dst, kernel_size, gauss_spatial_coeff, gauss_color_coeff, stream);
}
}
}}}
#define OCV_INSTANTIATE_BILATERAL_FILTER(T) \
template void cv::gpu::device::imgproc::bilateral_filter_gpu<T>(const PtrStepSzb&, PtrStepSzb, int, float, float, int, cudaStream_t);
OCV_INSTANTIATE_BILATERAL_FILTER(uchar)
//OCV_INSTANTIATE_BILATERAL_FILTER(uchar2)
OCV_INSTANTIATE_BILATERAL_FILTER(uchar3)
OCV_INSTANTIATE_BILATERAL_FILTER(uchar4)
#ifndef OPENCV_TINY_GPU_MODULE
//OCV_INSTANTIATE_BILATERAL_FILTER(schar)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar2)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar3)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar4)
OCV_INSTANTIATE_BILATERAL_FILTER(short)
//OCV_INSTANTIATE_BILATERAL_FILTER(short2)
OCV_INSTANTIATE_BILATERAL_FILTER(short3)
OCV_INSTANTIATE_BILATERAL_FILTER(short4)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort)
//OCV_INSTANTIATE_BILATERAL_FILTER(ushort2)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort3)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort4)
//OCV_INSTANTIATE_BILATERAL_FILTER(int)
//OCV_INSTANTIATE_BILATERAL_FILTER(int2)
//OCV_INSTANTIATE_BILATERAL_FILTER(int3)
//OCV_INSTANTIATE_BILATERAL_FILTER(int4)
#endif
OCV_INSTANTIATE_BILATERAL_FILTER(float)
//OCV_INSTANTIATE_BILATERAL_FILTER(float2)
OCV_INSTANTIATE_BILATERAL_FILTER(float3)
OCV_INSTANTIATE_BILATERAL_FILTER(float4)
#endif /* CUDA_DISABLER */
|
f4a8e332c6dc6d93d328e233474bb199a0b7b14c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8) {
float tmp_1 = +1.5378E-11f;
float tmp_2 = +0.0f;
comp = tmp_2 - tmp_1 + +1.9266E-42f * logf(+1.1951E17f / var_2 - (+1.4474E-37f / -1.7825E34f));
for (int i=0; i < var_1; ++i) {
comp += (var_3 + sqrtf((var_4 * var_5 * var_6)));
comp += (+1.3535E-42f * var_7 - (-1.9674E-42f + +1.9089E34f * var_8 * +1.6052E35f));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9);
hipDeviceSynchronize();
return 0;
}
|
f4a8e332c6dc6d93d328e233474bb199a0b7b14c.cu
|
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8) {
float tmp_1 = +1.5378E-11f;
float tmp_2 = +0.0f;
comp = tmp_2 - tmp_1 + +1.9266E-42f * logf(+1.1951E17f / var_2 - (+1.4474E-37f / -1.7825E34f));
for (int i=0; i < var_1; ++i) {
comp += (var_3 + sqrtf((var_4 * var_5 * var_6)));
comp += (+1.3535E-42f * var_7 - (-1.9674E-42f + +1.9089E34f * var_8 * +1.6052E35f));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9);
cudaDeviceSynchronize();
return 0;
}
|
5959f137e3c5c0b9dc9ba33b1cc53b6dba10a565.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _K1GEMM_CU_
#define _K1GEMM_CU_
namespace K1gemm {
#define BLOCK_SIZE_K1 16
const int MAX_M = 1024;
const int MAX_N = 1024;
// __constant__ float cA[MAX_M], cB[MAX_N];
__global__ void kernel_gemm(const int M, const int N, const float alpha,
const float* A, const float* B, const float beta, float *c) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < M && col < N) {
float ans = A[row] * B[col];
ans *= alpha;
if (beta != 0)
c[row * N + col] = c[row * N + col] * beta + ans;
else
c[row * N + col] = ans;
}
}
// gemm kernel
// 4: 1.3
// 7: 2.7
// 8: 4
// 14: 4.15
// 16: 5.08, pragma unroll(4): 5.18, manually unroll: 5.25
// 18: 4.5
// 20: 4.5
// 32: 4.85
// cublas: 10.5
// gemm interface
void caffe_gpu_gemm(const int M, const int N,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
int grid_rows = (M + BLOCK_SIZE_K1 - 1) / BLOCK_SIZE_K1;
int grid_cols = (N + BLOCK_SIZE_K1 - 1) / BLOCK_SIZE_K1;
dim3 gridSize(grid_cols, grid_rows);
dim3 blockSize(BLOCK_SIZE_K1, BLOCK_SIZE_K1);
// int A_size = M * sizeof(float);
// int B_size = N * sizeof(float);
// hipMemcpyToSymbol(cA, A, A_size);
// hipMemcpyToSymbol(cB, B, B_size);
hipLaunchKernelGGL(( kernel_gemm), dim3(gridSize), dim3(blockSize), 0, 0, M, N, alpha, A, B, beta, C);
}
} // K1gemm
#endif // _K1GEMM_CU_
|
5959f137e3c5c0b9dc9ba33b1cc53b6dba10a565.cu
|
#ifndef _K1GEMM_CU_
#define _K1GEMM_CU_
namespace K1gemm {
#define BLOCK_SIZE_K1 16
const int MAX_M = 1024;
const int MAX_N = 1024;
// __constant__ float cA[MAX_M], cB[MAX_N];
__global__ void kernel_gemm(const int M, const int N, const float alpha,
const float* A, const float* B, const float beta, float *c) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < M && col < N) {
float ans = A[row] * B[col];
ans *= alpha;
if (beta != 0)
c[row * N + col] = c[row * N + col] * beta + ans;
else
c[row * N + col] = ans;
}
}
// gemm kernel
// 4: 1.3
// 7: 2.7
// 8: 4
// 14: 4.15
// 16: 5.08, pragma unroll(4): 5.18, manually unroll: 5.25
// 18: 4.5
// 20: 4.5
// 32: 4.85
// cublas: 10.5
// gemm interface
void caffe_gpu_gemm(const int M, const int N,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
int grid_rows = (M + BLOCK_SIZE_K1 - 1) / BLOCK_SIZE_K1;
int grid_cols = (N + BLOCK_SIZE_K1 - 1) / BLOCK_SIZE_K1;
dim3 gridSize(grid_cols, grid_rows);
dim3 blockSize(BLOCK_SIZE_K1, BLOCK_SIZE_K1);
// int A_size = M * sizeof(float);
// int B_size = N * sizeof(float);
// cudaMemcpyToSymbol(cA, A, A_size);
// cudaMemcpyToSymbol(cB, B, B_size);
kernel_gemm<<<gridSize, blockSize>>>(M, N, alpha, A, B, beta, C);
}
} // K1gemm
#endif // _K1GEMM_CU_
|
93b0e4b5bbc42f143e32117f6d8590889cbe0559.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include<ops/declarable/helpers/addBias.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__global__ static void addBiasCuda( const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const bool isNCHW) {
// bias [oC]
// if(input_rank == 4)
// input and output have same shapes: [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW)
// if(input_rank == 5)
// input and output have same shapes: [bS, oD, oH, oW, oC] (NHWC) or [bS, oD, oC, oH, oW] (NCHW)
const X* x = reinterpret_cast<const X*>(vx);
const Y* y = reinterpret_cast<const Y*>(vy);
X* z = reinterpret_cast<X*>(vz);
__shared__ int rank, channelPosition, posOfNonUnityDim;
__shared__ Nd4jLong len, *sharedMem;
__shared__ bool xzSameOffsets, xzAreSame;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
rank = shape::rank(xShapeInfo); // xRank == zRank
xzSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
len = shape::length(xShapeInfo);
channelPosition = isNCHW ? 1 : rank - 1; // second or last
xzAreSame = x == z;
shape::isCommonVector(yShapeInfo, posOfNonUnityDim);
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < len; i += blockDim.x * gridDim.x) {
shape::index2coords(i, xShapeInfo, coords);
const auto xOffsets = shape::getOffset(xShapeInfo, coords);
const auto zOffsets = xzSameOffsets ? xOffsets : shape::getOffset(zShapeInfo, coords);
const auto yOffsets = coords[channelPosition] * shape::stride(yShapeInfo)[posOfNonUnityDim];
if(xzAreSame)
z[zOffsets] += static_cast<X>(y[yOffsets]);
else
z[zOffsets] = x[xOffsets] + static_cast<X>(y[yOffsets]);
}
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
static void addBiasCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const bool isNCHW) {
hipLaunchKernelGGL(( addBiasCuda<X,Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, isNCHW);
}
template<typename X, typename Y>
__global__ static void addBias2DCuda( const void* vx,
const void* vy,
void* vz,
uint32_t blocks, uint32_t length) {
auto y = reinterpret_cast<const Y*>(vy);
for (uint32_t b = blockIdx.x; b < blocks; b += gridDim.x) {
auto x = reinterpret_cast<const X*>(vx) + length * b;
auto z = reinterpret_cast<X*>(vz) + length * b;
for (uint32_t e = threadIdx.x; e < length; e += blockDim.x) {
z[e] = x[e] + y[e];
}
}
}
template<typename X, typename Y>
static void addBias2DCudaLauncher(const hipStream_t *stream, const void* vx,
const void* vy,
void* vz,
uint32_t blocks, uint32_t length) {
hipLaunchKernelGGL(( addBias2DCuda<X,Y>), dim3(256), dim3(1024), 128, *stream, vx, vy, vz, blocks, length);
}
//////////////////////////////////////////////////////////////////////////
ND4J_LOCAL void addBias(sd::graph::Context& block, const NDArray& input, const NDArray& bias, NDArray& output, const bool isNCHW) {
PointersManager manager(block.launchContext(), "addBias");
NDArray::prepareSpecialUse({&output}, {&input, &bias});
if (input.rankOf() == 2 && bias.rankOf() == 1 && input.ordering() == 'c' && output.ordering() == 'c' && input.ews() == 1 && bias.ews() == 1 && input.sizeAt(1) == bias.sizeAt(0)) {
BUILD_DOUBLE_SELECTOR(input.dataType(), bias.dataType(), addBias2DCudaLauncher,
(block.launchContext()->getCudaStream(), input.specialBuffer(), bias.specialBuffer(), output.specialBuffer(), input.sizeAt(0), bias.sizeAt(0)),
FLOAT_TYPES, FLOAT_TYPES);
} else {
// default case
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = input.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
BUILD_DOUBLE_SELECTOR(input.dataType(), bias.dataType(), addBiasCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), bias.specialBuffer(), bias.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), isNCHW),
FLOAT_TYPES, FLOAT_TYPES);
}
NDArray::registerSpecialUse({&output}, {&input, &bias});
manager.synchronize();
}
}
}
}
|
93b0e4b5bbc42f143e32117f6d8590889cbe0559.cu
|
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include<ops/declarable/helpers/addBias.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__global__ static void addBiasCuda( const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const bool isNCHW) {
// bias [oC]
// if(input_rank == 4)
// input and output have same shapes: [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW)
// if(input_rank == 5)
// input and output have same shapes: [bS, oD, oH, oW, oC] (NHWC) or [bS, oD, oC, oH, oW] (NCHW)
const X* x = reinterpret_cast<const X*>(vx);
const Y* y = reinterpret_cast<const Y*>(vy);
X* z = reinterpret_cast<X*>(vz);
__shared__ int rank, channelPosition, posOfNonUnityDim;
__shared__ Nd4jLong len, *sharedMem;
__shared__ bool xzSameOffsets, xzAreSame;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
rank = shape::rank(xShapeInfo); // xRank == zRank
xzSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
len = shape::length(xShapeInfo);
channelPosition = isNCHW ? 1 : rank - 1; // second or last
xzAreSame = x == z;
shape::isCommonVector(yShapeInfo, posOfNonUnityDim);
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < len; i += blockDim.x * gridDim.x) {
shape::index2coords(i, xShapeInfo, coords);
const auto xOffsets = shape::getOffset(xShapeInfo, coords);
const auto zOffsets = xzSameOffsets ? xOffsets : shape::getOffset(zShapeInfo, coords);
const auto yOffsets = coords[channelPosition] * shape::stride(yShapeInfo)[posOfNonUnityDim];
if(xzAreSame)
z[zOffsets] += static_cast<X>(y[yOffsets]);
else
z[zOffsets] = x[xOffsets] + static_cast<X>(y[yOffsets]);
}
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
static void addBiasCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const bool isNCHW) {
addBiasCuda<X,Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, isNCHW);
}
template<typename X, typename Y>
__global__ static void addBias2DCuda( const void* vx,
const void* vy,
void* vz,
uint32_t blocks, uint32_t length) {
auto y = reinterpret_cast<const Y*>(vy);
for (uint32_t b = blockIdx.x; b < blocks; b += gridDim.x) {
auto x = reinterpret_cast<const X*>(vx) + length * b;
auto z = reinterpret_cast<X*>(vz) + length * b;
for (uint32_t e = threadIdx.x; e < length; e += blockDim.x) {
z[e] = x[e] + y[e];
}
}
}
template<typename X, typename Y>
static void addBias2DCudaLauncher(const cudaStream_t *stream, const void* vx,
const void* vy,
void* vz,
uint32_t blocks, uint32_t length) {
addBias2DCuda<X,Y><<<256, 1024, 128, *stream>>>(vx, vy, vz, blocks, length);
}
//////////////////////////////////////////////////////////////////////////
ND4J_LOCAL void addBias(sd::graph::Context& block, const NDArray& input, const NDArray& bias, NDArray& output, const bool isNCHW) {
PointersManager manager(block.launchContext(), "addBias");
NDArray::prepareSpecialUse({&output}, {&input, &bias});
if (input.rankOf() == 2 && bias.rankOf() == 1 && input.ordering() == 'c' && output.ordering() == 'c' && input.ews() == 1 && bias.ews() == 1 && input.sizeAt(1) == bias.sizeAt(0)) {
BUILD_DOUBLE_SELECTOR(input.dataType(), bias.dataType(), addBias2DCudaLauncher,
(block.launchContext()->getCudaStream(), input.specialBuffer(), bias.specialBuffer(), output.specialBuffer(), input.sizeAt(0), bias.sizeAt(0)),
FLOAT_TYPES, FLOAT_TYPES);
} else {
// default case
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = input.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
BUILD_DOUBLE_SELECTOR(input.dataType(), bias.dataType(), addBiasCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), bias.specialBuffer(), bias.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), isNCHW),
FLOAT_TYPES, FLOAT_TYPES);
}
NDArray::registerSpecialUse({&output}, {&input, &bias});
manager.synchronize();
}
}
}
}
|
c0ad91b40797aaa30f37bea8e0aa66ab280d8c1d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//This file defines the main functions of the simulation. These functions are called in the "main" function inside the "main.cu" file.
#define _USE_MATH_DEFINES
#include "particle_positions.cuh"
#include "utilities.cuh"
#include "VTK.cuh"
#include "hashing.cuh"
#include "particle_parameters.cuh"
#include <math.h>
#include <future>
#include <chrono>
#include <math.h>
//declaration of all global variables that are going to be used in this file by all functions
char main_path[1024]; //stores the main path of the result instance
char vtk_group_path[1024]; //stores the path of the vtk group file
char vtu_fullpath[1024]; //stores the path of the current iteration file
char vtu_path[1024]; //stores the path of the vtu directory (where the vtu files are saved)
std::string pointDataNames[] = { "density" }; //stores the names of the point data to display in Paraview
std::string vectorDataNames[] = {"velocity" }; //stores the names of the vector data to display in Paraview
vec3d* d_POSITION; //stores the pointer to the position data in the GPU
vec3d* d_PRED_POSITION; //stores the pointer to the predicted position data in the GPU
vec3d* d_VELOCITY; //stores the pointer to the velocity data in the GPU
vec3d* d_PRED_VELOCITY; //stores the pointer to the predicted data in the GPU
vec3d* d_ST_FORCE; //stores the pointer to the surface tension force data in the GPU
vec3d* d_VISCOSITY_FORCE; //stores the pointer to the viscosity force data in the GPU
vec3d* d_PRESSURE_FORCE; //stores the pointer to the pressure force data in the GPU
vec3d* d_NORMAL; //stores the pointer to the normal data in the GPU
float* DENSITY; //stores the pointer to the density data in the CPU
float* d_DENSITY; //stores the pointer to the density data in the GPU
float* d_PRESSURE; //stores the pointer to the pressure data in the GPU
float* d_MASS; //stores the pointer to the mass data in the GPU
int* d_TYPE; //stores the pointer to the type data in the GPU
int* d_hashtable; //stores the pointer to the hashtable data in the GPU
vec3d gravity; //stores the pointer to the gravity data in the CPU
//physical constants
float rho_0; //rest density
float visc_const; //viscosity constant
float st_const; // surface tension constant
float epsilon; // dumping coefficient for collision
//initial conditions
float PARTICLE_RADIUS; //stores the particle radius value
float MASS_calc; //stores the calculated mass value
float USER_MASS; //stores the mass defined by the user in
float PARTICLE_DIAMETER; //stores the particle diameter value
float F_INITIAL_POSITION[3]; //fluid particles initial position
float F_FINAL_POSITION[3]; //fluid particles final position
float B_INITIAL_POSITION[3]; //boundary particles final position
float B_FINAL_POSITION[3]; //boundary particles final position
float V_INITIAL[3]; //initial velocity defined by the user
//controlling iteration number and simulation time
int iteration = 1; //iteration counter
float simulation_time; //in seconds
float final_time; //in seconds
int N; //number of fluid particles
int B; //number of bondary particles
int T; //total number of particles
//variables for hashtable
size_t pitch; //this variable is defined by the GPU when the hipMallocPitch runs
int particles_per_row; //this is the maximum number of neighbors a particle can have due to memory allocation
int hashtable_size; //this is the size of the hashtable. Must be a power of 2.
//CUDA variables
int block_size;
int grid_size;
//PCISPH variables
float invh; //inverse of the smoothing radius
float h; //smoothing radius
float vol_comp_perc; //user defined volume compression rate <- defined in section 3.3 of [2]
float dens_fluc_perc; //user defined density fluctuation rate <- defined in section 3.3 of [2]
float* d_max_force; // GPU pointer to max_force variable
float* d_max_velocity; // GPU pointer to max_velocity variable
float* d_max_rho_err; // GPU pointer to max_rho_err variable (max density error)
float* d_sum_rho_err; // GPU pointer to sum_rho_err variable (sum of all density errors across all variables to compute mean density error)
float delta_t; // time step
float max_vol_comp; // variable to stored computed value of max volume compression ( = rho_0 * vol_comp_perc / 100 )
float max_rho_fluc; // variable to stored computed value of max density fluctuation ( = rho_0 * dens_fluc_perc / 100 )
float BOUNDARY_DIAMETER; // diameter of boundary particles
float BOUNDARY_RADIUS; // radius of boundary particles
float pressure_delta; // defined in section 2.3 of [1] -> here this value is calculated without the "beta" variable, which is calculated afterwards
float max_rho_err_t_1 = 0.f; // max density error in the previous time_step
float max_rho_err = 0.f; // max density error in the current time_step (CPU memory)
bool write_pvd = true; // this tells either the program should or not write a file
char* user_results_folder = new char[256]; // user defined results folder
float save_steps; // user defined time steps to save a file
// this function reads all files in the /props folder and stores the values in the designated variables.
// If any new variable should be added or deleted in any of the props files, this function must be edited.
int fileReader() {
//allocating memory
char* row = new char[256]; //buffer for rows
int row_buff_index = 0; //index for row buffer
char* num_buffer = new char[256]; //buffer for numbers
int num_buffer_index = 0; //index for number buffer
float num; //stores a float variable
vec3d vec; //stores a vec3d variable
//Storing the names of varibles as they are in the files in /props folder
char* phys_props_names[] = { "rho_0","visc_const","surface_tension_const","collision_dumping_coeff" };
char* init_cond_names[] = {"particle_radius","mass","fluid_initial_coord","fluid_final_coord","boundary_initial_coord","boundary_final_coord","fluid_initial_velocity","maximum_volume_compression","maximum_density_fluctuation"};
char* system_names[] = { "initial_delta_t","initial_time","final_time","neighbors_per_particle", "save_steps","results_folder"};
int phys_props_size = sizeof(phys_props_names) / 8;
int init_cond_size = sizeof(init_cond_names) / 8;
int system_size = sizeof(system_names) / 8;
//storing the paths for each file
char* phys_props_path = "./props/physical_props.txt";
char* initial_conditions_path = "./props/initial_conditions.txt";
char* system_path = "./props/system.txt";
//Checking either the files exist or not -> give error and stops execution in case of error
if (fileExists(phys_props_path) != 0) {
std::cout << "\nERROR! Could not find physical properties file at " << phys_props_path << "\n";
return 1;
}
if (fileExists(phys_props_path) != 0) {
std::cout << "\nERROR! Could not find initial conditions file at " << phys_props_path << "\n";
return 1;
}
if (fileExists(phys_props_path) != 0) {
std::cout << "\nERROR! Could not find system names file at " << phys_props_path << "\n";
return 1;
}
//reading physical properties
std::ifstream phys_props (phys_props_path);
for (char write2line; phys_props.get(write2line);) {
if (phys_props.eof()) {
break;
}
if (write2line == 10) {
int i = 0;
for (i; i < phys_props_size; i++) {
if (strstr(row, phys_props_names[i]) != nullptr) {
break;
}
}
if (i < phys_props_size) {
bool save_char = false;
for (int j = 0; j < strlen(row); j++) {
if (row[j] == 61) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (!isdigit(row[k + 1])) {
j++;
}
else { break; }
}
}
else if (row[j] == 59) {
num = (float)atof(num_buffer);
num_buffer_index = 0;
num_buffer = new char[256];
break;
}
else if ((isdigit(row[j]) || row[j] == 46 || row[j] == 45) && save_char) {
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
if (i == 0) {
rho_0 = num;
}
else if (i == 1) {
visc_const = num;
}
else if (i == 2) {
st_const = num;
}
else if (i == 3) {
epsilon = num;
}
}
row = new char[256];
row_buff_index = 0;
}
else if (write2line != 10) {
row[row_buff_index] = write2line;
row_buff_index++;
}
}
row = new char[256];
row_buff_index = 0;
phys_props.close();
//reading initial conditions
std::ifstream init_conds(initial_conditions_path);
for (char write2line; init_conds.get(write2line);) {
if (init_conds.eof()) {
break;
}
if (write2line == 10) {
int i = 0;
for (i; i < init_cond_size; i++) {
if (strstr(row, init_cond_names[i]) != nullptr) {
break;
}
}
if (i < init_cond_size) {
if (strstr(row, "[") != nullptr) {
bool save_char = false;
int axis_count = 0;
for (int j = 0; j < strlen(row); j++) {
if (axis_count > 2) {
axis_count = 0;
break;
}
if (row[j] == 91) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (!isdigit(row[k + 1])) {
j++;
}
else { break; }
}
}
else if (row[j] == 44 || row[j] == 93) {
num = (float)atof(num_buffer);
if (axis_count == 0) {
vec.x = num;
} else if (axis_count == 1) {
vec.y = num;
}
else if (axis_count == 2) {
vec.z = num;
}
axis_count++;
if (row[j] == 32) {
j++;
}
num_buffer_index = 0;
num_buffer = new char[256];
}
else if ((isdigit(row[j]) || row[j] == 46 || row[j] == 45) && save_char) {
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
}
else {
bool save_char = false;
for (int j = 0; j < strlen(row); j++) {
if (row[j] == 61) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (!isdigit(row[k + 1])) {
j++;
}
else { break; }
}
}
else if (row[j] == 59) {
num = (float)atof(num_buffer);
num_buffer_index = 0;
num_buffer = new char[256];
break;
}
else if ((isdigit(row[j]) || row[j] == 46 || row[j] == 45) && save_char) {
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
}
if (i == 0) {
PARTICLE_RADIUS = num;
}
else if (i == 1) {
USER_MASS = num;
}
else if (i == 2) {
F_INITIAL_POSITION[0] = vec.x;
F_INITIAL_POSITION[1] = vec.y;
F_INITIAL_POSITION[2] = vec.z;
}
else if (i == 3) {
F_FINAL_POSITION[0] = vec.x;
F_FINAL_POSITION[1] = vec.y;
F_FINAL_POSITION[2] = vec.z;
}
else if (i == 4) {
B_INITIAL_POSITION[0] = vec.x;
B_INITIAL_POSITION[1] = vec.y;
B_INITIAL_POSITION[2] = vec.z;
}
else if (i == 5) {
B_FINAL_POSITION[0] = vec.x;
B_FINAL_POSITION[1] = vec.y;
B_FINAL_POSITION[2] = vec.z;
}
else if (i == 6) {
V_INITIAL[0] = vec.x;
V_INITIAL[1] = vec.y;
V_INITIAL[2] = vec.z;
}
else if (i == 7) {
vol_comp_perc = num;
}
else if (i == 8) {
dens_fluc_perc = num;
}
}
row = new char[256];
row_buff_index = 0;
}
else if (write2line != 10) {
row[row_buff_index] = write2line;
row_buff_index++;
}
}
row = new char[256];
row_buff_index = 0;
init_conds.close();
std::ifstream system_vars(system_path);
for (char write2line; system_vars.get(write2line);) {
if (system_vars.eof()) {
break;
}
if (write2line == 10) {
int i = 0;
for (i; i < system_size; i++) {
if (strstr(row, system_names[i]) != nullptr) {
break;
}
}
if (i < system_size) {
bool save_char = false;
if (strstr(row, "\"") != nullptr) {
for (int j = 0; j < strlen(row); j++) {
if (row[j] == 34 && !save_char) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (row[k+1] == 32) {
j++;
}
else { break; }
}
}
else if (row[j] == 34 && save_char) {
break;
}
else if (save_char){
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
}
else {
for (int j = 0; j < strlen(row); j++) {
if (row[j] == 61) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (!isdigit(row[k + 1])) {
j++;
}
else { break; }
}
}
else if (row[j] == 59) {
num = (float)atof(num_buffer);
num_buffer_index = 0;
num_buffer = new char[256];
break;
}
else if ((isdigit(row[j]) || row[j] == 46 || row[j] == 45) && save_char) {
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
}
if (i == 0) {
delta_t = num;
}
else if (i == 1) {
simulation_time = num;
}
else if (i == 2) {
final_time = num;
}
else if (i == 3) {
particles_per_row = (int)num;
}
else if (i == 4) {
save_steps = num;
}
else if (i == 5) {
user_results_folder = num_buffer;
}
}
row = new char[256];
row_buff_index = 0;
}
else if (write2line != 10) {
row[row_buff_index] = write2line;
row_buff_index++;
}
}
return 0;
}
// this function initialized the execution. It creates the particles, calculates some variables and allocate memory in the GPU for the main loop.
int initialize() {
//Display GPU information and checking if the program is running in a CUDA capable machine or not.
hipDeviceProp_t* prop = new hipDeviceProp_t;
gpuErrchk(hipGetDeviceProperties(prop,0));
std::cout << "-----------------------------------------------\n";
std::cout << "DEVICE PROPERTIES:\n" << "Device name: " << prop->name << "\n" <<
"Max number of threads per block: " << prop->maxThreadsPerBlock << "\n" <<
"Total global memory: " << dround(prop->totalGlobalMem/1e9,2) << " gigabytes\n" <<
"Registers per block: " << prop->regsPerBlock << "\n" <<
"Shared Memory per block: " << prop->sharedMemPerBlock << " bytes\n" <<
"-----------------------------------------------\n";
block_size = prop->maxThreadsPerBlock; //stores the size of the thread blocks. Here its set to be the same size of the max threads per block of your GPU (1024 in the modern devices).
max_vol_comp = rho_0 * vol_comp_perc / 100;
max_rho_fluc = rho_0 * dens_fluc_perc / 100;
//If the user did not define a mass, calculate it.
if (USER_MASS == 0) {
MASS_calc = rho_0 * (float)M_PI * pow(PARTICLE_RADIUS, 3.f) / 3.f * 4.f;
}
else {
MASS_calc = USER_MASS;
}
PARTICLE_DIAMETER = 2 * PARTICLE_RADIUS;
// get main path of simulation
getMainPath(main_path);
// write path for vtu files
strcpy(vtu_path, main_path);
strcat(vtu_path, "/vtu");
// write path for vtk group file
strcpy(vtk_group_path, main_path);
strcat(vtk_group_path, "/PCISPH.pvd");
// create directory for vtu files
CreateDir(vtu_path);
float VOLUME = 1;
const int SIMULATION_DIMENSION = 3; //3 for a 3D simulation
// Get number per dimension (NPD) of FLUID particles for hexadecimal packing (assuming use of makeprism function)
int NPD[3]; //Number per dimension
for (int i = 0; i < 3; i++) {
if (i == 1) {
NPD[i] = static_cast<int>(floor((F_FINAL_POSITION[i] - F_INITIAL_POSITION[i]) / (sqrt(3.f) / 2.f * PARTICLE_DIAMETER)));
VOLUME = VOLUME * (F_FINAL_POSITION[i] - F_INITIAL_POSITION[i]);
}
else {
NPD[i] = static_cast<int>(floor((F_FINAL_POSITION[i] - F_INITIAL_POSITION[i]) / PARTICLE_DIAMETER));
VOLUME = VOLUME * (F_FINAL_POSITION[i] - F_INITIAL_POSITION[i]);
}
}
//Writing NPD to device
int* D_NPD; //Device pointer to NPD variable
gpuErrchk(hipMalloc((void**)&D_NPD, SIMULATION_DIMENSION * sizeof(float))); //Allocate GPU memory
gpuErrchk(hipMemcpy(D_NPD, NPD, SIMULATION_DIMENSION * sizeof(float), hipMemcpyHostToDevice)); //Write NPD to D_NPD
N = NPD[0] * NPD[1] * NPD[2]; //number of fluid particles
int SIM_SIZE = N * SIMULATION_DIMENSION; //size of the fluid part of the simulation
const int x = 40; // Number of particles inside the smoothing length
h = powf(3.f * VOLUME * x / (4.f * (float)M_PI * N), 1.f / 3.f); //smoothing length
invh = 1 / h; // inverse of smoothing length (this is calculated to make things faster in the main loop)
vec3d f_initial; //initial position taking in account the offset of the particle radius
f_initial.x = F_INITIAL_POSITION[0] + PARTICLE_RADIUS;
f_initial.y = F_INITIAL_POSITION[1] + PARTICLE_RADIUS;
f_initial.z = F_INITIAL_POSITION[2] + PARTICLE_RADIUS;
size_t bytes_fluid_particles = SIM_SIZE * sizeof(float);
vec3d* FLUID_POSITIONS; //host pointer (CPU memory)
FLUID_POSITIONS = (vec3d*)malloc(bytes_fluid_particles); //allocating CPU memory
vec3d* D_FLUID_POSITIONS; //device pointer (GPU memory)
gpuErrchk(hipMalloc((void**)&D_FLUID_POSITIONS, bytes_fluid_particles)); //allocating GPU memory
// grid -> number of blocks
// block -> number of threads
grid_size = N / block_size + 1; //defining number of blocks
//generate locations for each particle
//check "particle_positions.cuh" file in /lib folder for more details
makePrism << <grid_size, block_size >> > (D_FLUID_POSITIONS, PARTICLE_DIAMETER, f_initial, D_NPD, N);
BOUNDARY_DIAMETER = h/2; //defining the diameter of a boundary particle as stated in section 3.2 in [2]
BOUNDARY_RADIUS = h/4;
// Get number per dimension (NPD) of BOUNDARY particles without compact packing (assuming use of makebox function)
for (int i = 0; i < 3; i++) {
NPD[i] = static_cast<int>(ceil((B_FINAL_POSITION[i] - B_INITIAL_POSITION[i]) / BOUNDARY_DIAMETER)) + 2;
}
B = NPD[0] * NPD[1] * NPD[2] - (NPD[0] - 2) * (NPD[1] - 2) * (NPD[2] - 2); //Number of boundary particles
SIM_SIZE = NPD[0] * NPD[1] * NPD[2] * SIMULATION_DIMENSION;
vec3d b_initial; //initial position taking in account the offset of the boundary particle radius
b_initial.x = B_INITIAL_POSITION[0] - BOUNDARY_RADIUS;
b_initial.y = B_INITIAL_POSITION[1] - BOUNDARY_RADIUS;
b_initial.z = B_INITIAL_POSITION[2] - BOUNDARY_RADIUS;
vec3d b_final; //final position taking in account the offset of the boundary particle radius
b_final.x = b_initial.x + BOUNDARY_DIAMETER * (NPD[0] - 1);
b_final.y = b_initial.y + BOUNDARY_DIAMETER * (NPD[1] - 1);
b_final.z = b_initial.z + BOUNDARY_DIAMETER * (NPD[2] - 1);
size_t bytes_boundary_particles = SIM_SIZE * sizeof(float); // number of bytes the boundary particles are occupying
vec3d* BOUNDARY_POSITIONS; //host pointer (CPU memory)
BOUNDARY_POSITIONS = (vec3d*)malloc(bytes_boundary_particles); //allocate memory in the host
vec3d* D_BOUNDARY_POSITIONS; //device pointer (GPU memory)
gpuErrchk(hipMalloc((void**)&D_BOUNDARY_POSITIONS, bytes_boundary_particles)); //allocate memory in the device
// this function makes an empty box with walls with 1 particle of thickness
// check "particle_positions.cuh" file in /lib folder for more details
makeBox(D_BOUNDARY_POSITIONS, BOUNDARY_DIAMETER, b_initial, b_final, block_size, D_NPD,NPD, SIMULATION_DIMENSION);
T = N + B; //Total number of particles
//writing particle position memory from GPU to CPU (note the "hipMemcpyDeviceToHost" statement in the functions below)
gpuErrchk(hipMemcpy(FLUID_POSITIONS, D_FLUID_POSITIONS, bytes_fluid_particles, hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(BOUNDARY_POSITIONS, D_BOUNDARY_POSITIONS, bytes_boundary_particles, hipMemcpyDeviceToHost));
// Free GPU memory for fluid particles (this memory will be reallocated with another name soon)
hipFree(D_FLUID_POSITIONS);
// HASHING ONLY FOR BOUNDARY PARTICLES
hashtable_size = powf(2, 19);
//creating a new hashtable
Hash b_hash(hashtable_size);
int* hashtable = new int[hashtable_size * particles_per_row];
//this loop creates an empty hashtable (full of -1s)
for (int i = 0; i < hashtable_size; ++i) {
for (int j = 0; j < particles_per_row; j++) {
hashtable[i * particles_per_row + j] = -1;
}
}
//allocating 2D memory for hashtable
gpuErrchk(hipMallocPitch(&d_hashtable, &pitch, particles_per_row * sizeof(int), hashtable_size));
//writing clean hashtable to GPU
gpuErrchk(hipMemcpy2D(d_hashtable, pitch, hashtable, particles_per_row * sizeof(int), particles_per_row * sizeof(int), hashtable_size, hipMemcpyHostToDevice));
grid_size = B / block_size + 1;
//this function makes a functional hashtable
hashParticlePositions << <grid_size, block_size >> > (d_hashtable, D_BOUNDARY_POSITIONS, invh, b_hash, B, pitch, particles_per_row);
float* d_boundary_mass; //pointer to device memory of boundary "fake" mass ( or psi )
gpuErrchk(hipMalloc((void**)&d_boundary_mass, B * sizeof(float)));
// calculates "fake" mass (or psi) for each boundary particle as state in [3]
// check "particle_parameters.cuh" file in /lib folder for more details
boundaryPsi << <grid_size, block_size >> > (d_boundary_mass, d_hashtable, rho_0, D_BOUNDARY_POSITIONS, h, invh, particles_per_row, pitch, b_hash, B);
float* boundary_mass = (float*)malloc(B * sizeof(float)); //CPU pointer to boundary mass
//copy boundary mass from GPU to CPU
gpuErrchk(hipMemcpy(boundary_mass, d_boundary_mass, (size_t)B * sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipFree(d_boundary_mass));
vec3d* d_boundary_normal; //device pointer for boundary normal
gpuErrchk(hipMalloc((void**)&d_boundary_normal, B * 3 * sizeof(float)));
// calculate normal for boundary particles
// check "particle_parameters.cuh" file in /lib folder for more details
boundaryNormal << <grid_size, block_size >> > (d_boundary_normal, D_BOUNDARY_POSITIONS, b_initial, b_final, B);
vec3d* boundary_normal = (vec3d*)malloc(B * 3 * sizeof(float)); //pointer for CPU memory of boundary normal
// copying boundary normal memory from GPU to CPU
gpuErrchk(hipMemcpy(boundary_normal, d_boundary_normal, (size_t)B * 3 * sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipFree(d_boundary_normal)); //cleaning GPU memory of boundary normal (this will be reallocated later with another name)
// writing boundary vtu file
float** boundary_point_data[] = { &boundary_mass };
int size_pointData = sizeof(boundary_point_data) / sizeof(double);
vec3d** boundary_vectorData[] = { &boundary_normal };
int size_vectorData = sizeof(boundary_vectorData) / sizeof(double);
std::string boundary_pointDataNames[] = { "psi" };
std::string boundary_vectorDataNames[] = { "normal" };
VTU_Writer(main_path, iteration, BOUNDARY_POSITIONS, B, boundary_point_data, boundary_vectorData, boundary_pointDataNames, boundary_vectorDataNames, size_pointData, size_vectorData, vtu_fullpath, 1);
hipFree(d_hashtable); //cleaning GPU from hashtable memory
hipFree(D_BOUNDARY_POSITIONS); //cleaning GPU from boundary particle memory
// calculating pressure delta (without the beta variable) as stated in section 2.3 of [1]
int count = 0;
float min_r = std::numeric_limits<float>::infinity();
int selected_index;
int tmp_size = static_cast<int>(ceil((2 * (h + PARTICLE_DIAMETER)) / PARTICLE_DIAMETER));
vec3d* tmp_points = (vec3d*)malloc(tmp_size * tmp_size * tmp_size * 3 * sizeof(float));
// generating fake particle positions without any packing method (the same is done in [5])
for (float i = -h - PARTICLE_DIAMETER; i <= h + PARTICLE_DIAMETER; i += PARTICLE_DIAMETER) {
for (float j = -h - PARTICLE_DIAMETER; j <= h + PARTICLE_DIAMETER; j += PARTICLE_DIAMETER) {
for (float k = -h - PARTICLE_DIAMETER; k <= h + PARTICLE_DIAMETER; k += PARTICLE_DIAMETER) {
tmp_points[count].x = i;
tmp_points[count].y = j;
tmp_points[count].z = k;
count++;
float r = sqrt(i*i+j*j+k*k);
if (r < min_r) {
min_r = r;
selected_index = count;
}
}
}
}
vec3d selected_point = tmp_points[selected_index];
vec3d r_vector;
float r;
vec3d Grad_W;
Grad_W.x = 0.f;
Grad_W.y = 0.f;
Grad_W.z = 0.f;
float dot_Grad_W = 0.f;
// summation of the calculated kernel gradients
for (int i = 0; i < count; i++) {
r_vector.x = tmp_points[i].x - selected_point.x;
r_vector.y = tmp_points[i].y - selected_point.y;
r_vector.z = tmp_points[i].z - selected_point.z;
r = sqrt(r_vector.x* r_vector.x + r_vector.y* r_vector.y + r_vector.z* r_vector.z);
if (r <= h) {
vec3d inst_Grad_W = Poly6_Gradient(selected_index, i, tmp_points, r, h, invh);
Grad_W.x += inst_Grad_W.x;
Grad_W.y += inst_Grad_W.y;
Grad_W.z += inst_Grad_W.z;
dot_Grad_W += dot_product(inst_Grad_W, inst_Grad_W);
}
}
pressure_delta = -dot_product(Grad_W, Grad_W) - dot_Grad_W;
//Initializing main particle variables
//Defining and allocating main position variable
vec3d* POSITION = (vec3d*)malloc(3*T*sizeof(float));
for (int i = 0; i < N; i++) {
POSITION[i].x = FLUID_POSITIONS[i].x;
POSITION[i].y = FLUID_POSITIONS[i].y;
POSITION[i].z = FLUID_POSITIONS[i].z;
}
for (int i = N; i < T; i++) {
POSITION[i].x = BOUNDARY_POSITIONS[i - N].x;
POSITION[i].y = BOUNDARY_POSITIONS[i - N].y;
POSITION[i].z = BOUNDARY_POSITIONS[i - N].z;
}
free(BOUNDARY_POSITIONS);
free(FLUID_POSITIONS);
gpuErrchk(hipMalloc((void**)&d_POSITION, 3*T*sizeof(float)));
gpuErrchk(hipMemcpy(d_POSITION, POSITION, 3*T*sizeof(float), hipMemcpyHostToDevice));
//Allocating memory for predicted positions and copying previous position vectors
gpuErrchk(hipMalloc((void**)&d_PRED_POSITION, 3 * T * sizeof(float)));
gpuErrchk(hipMemcpy(d_PRED_POSITION, POSITION, 3 * T * sizeof(float), hipMemcpyHostToDevice));
//Allocating memory for predicted velocity
gpuErrchk(hipMalloc((void**)&d_PRED_VELOCITY, 3 * N * sizeof(float)));
//Defining and allocating main velocity variable
vec3d* VELOCITY = (vec3d*)malloc(3*N*sizeof(float));
for (int i = 0; i < N; i++) {
VELOCITY[i].x = V_INITIAL[0];
VELOCITY[i].y = V_INITIAL[1];
VELOCITY[i].z = V_INITIAL[2];
}
gpuErrchk(hipMalloc((void**)&d_VELOCITY, 3*N*sizeof(float)));
gpuErrchk(hipMemcpy(d_VELOCITY, VELOCITY, 3*N*sizeof(float), hipMemcpyHostToDevice));
//Defining and allocating main st force variable
vec3d* ST_FORCE = (vec3d*)malloc(3*N*sizeof(float));
for (int i = 0; i < N; i++) {
ST_FORCE[i].x = 0.f;
ST_FORCE[i].y = 0.f;
ST_FORCE[i].z = 0.f;
}
gpuErrchk(hipMalloc((void**)&d_ST_FORCE, 3*N*sizeof(float)));
//Defining and allocating main viscosity force variable
vec3d* VISCOSITY_FORCE = (vec3d*)malloc(3*N*sizeof(float));
for (int i = 0; i < N; i++) {
VISCOSITY_FORCE[i].x = 0.f;
VISCOSITY_FORCE[i].y = 0.f;
VISCOSITY_FORCE[i].z = 0.f;
}
gpuErrchk(hipMalloc((void**)&d_VISCOSITY_FORCE, 3*N*sizeof(float)));
//Defining and allocating main pressure force variable
vec3d* PRESSURE_FORCE = (vec3d*)malloc(3*N*sizeof(float));
for (int i = 0; i < N; i++) {
PRESSURE_FORCE[i].x = 0.f;
PRESSURE_FORCE[i].y = 0.f;
PRESSURE_FORCE[i].z = 0.f;
}
gpuErrchk(hipMalloc((void**)&d_PRESSURE_FORCE, 3*N*sizeof(float)));
//Defining and allocating main normal variable
vec3d* NORMAL = (vec3d*)malloc(3*T*sizeof(float));
for (int i = 0; i < N; i++) {
NORMAL[i].x = 0.f;
NORMAL[i].y = 0.f;
NORMAL[i].z = 0.f;
}
for (int i = N; i < T; i++) {
NORMAL[i].x = boundary_normal[i - N].x;
NORMAL[i].y = boundary_normal[i - N].y;
NORMAL[i].z = boundary_normal[i - N].z;
}
free(boundary_normal);
gpuErrchk(hipMalloc((void**)&d_NORMAL, 3*T*sizeof(float)));
gpuErrchk(hipMemcpy(d_NORMAL, NORMAL, 3*T*sizeof(float), hipMemcpyHostToDevice));
//Defining and allocating main density array
DENSITY = (float*)malloc(N * sizeof(float));
for (int i = 0; i < N; i++) {
DENSITY[i] = 0.f;
}
gpuErrchk(hipMalloc((void**)&d_DENSITY, N * sizeof(float)));
gpuErrchk(hipMemcpy(d_DENSITY, DENSITY, N * sizeof(float), hipMemcpyHostToDevice));
//Defining and allocating main pressure array
float* PRESSURE = (float*)malloc(N * sizeof(float));
for (int i = 0; i < N; i++) {
PRESSURE[i] = 0;
}
gpuErrchk(hipMalloc((void**)&d_PRESSURE, N * sizeof(float)));
//Defining and allocating main mass array
float* MASS = (float*)malloc(T * sizeof(float));
for (int i = 0; i < N; i++) {
MASS[i] = MASS_calc;
}
for (int i = N; i < T; i++) {
MASS[i] = boundary_mass[i - N];
}
free(boundary_mass);
gpuErrchk(hipMalloc((void**)&d_MASS, T * sizeof(float)));
gpuErrchk(hipMemcpy(d_MASS, MASS, T * sizeof(float), hipMemcpyHostToDevice));
//Defining and allocating main type array (0 if fluid, 1 if boundary)
int* TYPE = (int*)malloc(T * sizeof(int));
for (int i = 0; i < N; i++) {
TYPE[i] = 0;
}
for (int i = N; i < T; i++) {
TYPE[i] = 1;
}
gpuErrchk(hipMalloc((void**)&d_TYPE, T * sizeof(int)));
gpuErrchk(hipMemcpy(d_TYPE, TYPE, T * sizeof(int), hipMemcpyHostToDevice));
//Defining and allocating memory to store max density error
gpuErrchk(hipMalloc((void**)&d_max_rho_err, sizeof(float)));
//Defining and allocating memory to store max force value
gpuErrchk(hipMalloc((void**)&d_max_force, sizeof(float)));
//Defining and allocating memory to store max velocity value
gpuErrchk(hipMalloc((void**)&d_max_velocity, sizeof(float)));
//Defining and allocating memory to store summation of density errors to calculate average error
gpuErrchk(hipMalloc((void**)&d_sum_rho_err, sizeof(float)));
//defining gravity vector
gravity.x = 0.f;
gravity.y = -9.81f;
gravity.z = 0.f;
//Defining variables to write VTU files
float** pointData[] = { &DENSITY }; // here the CPU pointers to the FLOAT variables that you want to write in the VTU must be defined
vec3d** vectorData[] = { &VELOCITY }; // here the CPU pointers to the VEC3D variables that you want to write in the VTU must be defined
size_pointData = sizeof(pointData) / 8;
size_vectorData = sizeof(vectorData) / 8;
VTU_Writer(vtu_path, iteration, POSITION, N, pointData, vectorData, pointDataNames, vectorDataNames, size_pointData, size_vectorData, vtu_fullpath);
VTK_Group(vtk_group_path, vtu_fullpath, simulation_time);
// Initialize main hashtable
//allocating memory for GPU hashtable
gpuErrchk(hipMallocPitch(&d_hashtable, &pitch, particles_per_row * sizeof(int), hashtable_size));
writeTimeKeeper(main_path,max_rho_err); //time keeper file with values for time, iteration and max density error
std::cout << N << " Fluid particles\n"
<< B << " Boundary particles\n"
<< "Total of " << T << " particles.\n"
<< "Smoothing radius = " << h << " m.\n"
<< "hashtable size = " << hashtable_size << "\n";
return 0;
}
// here is where the magic happens
// comments with -> refer to the same lines of the pseudo code in Algorithm 2 in [2]
// -> while animating do
int mainLoop() {
// -> for each particle i,b do
// -> find neighbors Ni,b(t)
// here the hashtable is initialized and reset
Hash hash(hashtable_size);
grid_size = hashtable_size / block_size + 1;
hashtableReset << <grid_size, block_size >> > (d_hashtable, particles_per_row, pitch, hashtable_size);
// then a new hashtable is created
grid_size = T / block_size + 1;
hashParticlePositions << <grid_size, block_size >> > (d_hashtable, d_POSITION, invh, hash, T, pitch, particles_per_row);
// -> for each particle i do
// here there are tow more step than the pseudo algorithm:
// calculate density
grid_size = N / block_size + 1;
DensityCalc << <grid_size, block_size >> > (d_POSITION, d_MASS, d_DENSITY, h, invh, rho_0, particles_per_row, pitch, d_hashtable, hash, N);
// and the normal for each fluid particle
fluidNormal << <grid_size, block_size >> > (d_NORMAL, d_POSITION, d_MASS, d_DENSITY,d_TYPE, rho_0, h,invh, hash,d_hashtable, particles_per_row,pitch, N);
// -> compute forces Fi for viscosity and surface tension (gravity is only accounted later)
nonPressureForces << <grid_size, block_size >> > (d_POSITION, d_VISCOSITY_FORCE, d_ST_FORCE, d_MASS, d_DENSITY, d_VELOCITY, d_NORMAL, gravity,d_TYPE, h, invh, rho_0, visc_const, st_const, particles_per_row, pitch,d_hashtable, hash, N);
// -> set pressure pi(t) = 0
resetPressure << <grid_size, block_size >> > (d_PRESSURE, N);
// here the step to set the pressure force value as 0 is ignored as it is done on later steps
// calculate the pressure coefficient as in Equation 8 of [1]
float pressure_coeff = -1 / (2 * powf(MASS_calc * delta_t / rho_0, 2) * pressure_delta);
gpuErrchk(hipPeekAtLastError()); // this is for checking if there was any error during the kernel execution
gpuErrchk(hipDeviceSynchronize());
int _k_ = 0; // defined with underscores to prevent overwritting
// -> while k < 3 do
while (_k_ < 3) {
// -> for each particle i do
// -> predicit velocity
// -> predicit position
grid_size = N / block_size + 1;
positionAndVelocity << <grid_size, block_size >> > (d_PRED_POSITION,d_PRED_VELOCITY,d_POSITION, d_VELOCITY, d_PRESSURE_FORCE, d_VISCOSITY_FORCE, d_ST_FORCE, gravity, d_MASS, delta_t, N);
// -> predict world collision
collisionHandler << <grid_size, block_size >> > (d_PRED_POSITION, d_PRED_VELOCITY, d_NORMAL, d_TYPE, d_hashtable, h, invh, pitch, hash, particles_per_row, BOUNDARY_DIAMETER, epsilon, N);
// reset and create new hashtable
grid_size = hashtable_size / block_size + 1;
hashtableReset << <grid_size, block_size >> > (d_hashtable, particles_per_row, pitch, hashtable_size);
grid_size = T / block_size + 1;
hashParticlePositions << <grid_size, block_size >> > (d_hashtable, d_PRED_POSITION, invh, hash, T, pitch, particles_per_row);
// update distances to neighbors is unnecessary here
// -> predict density
grid_size = N / block_size + 1;
DensityCalc << <grid_size, block_size >> > (d_PRED_POSITION, d_MASS, d_DENSITY, h, invh, rho_0, particles_per_row, pitch, d_hashtable, hash, N);
// -> predict density variation and -> update pressure
PressureCalc << <grid_size, block_size >> > (d_PRESSURE, d_DENSITY, rho_0, pressure_coeff, N);
// -> compute pressure force
PressureForceCalc << <grid_size, block_size >> > (d_PRED_POSITION, d_PRESSURE_FORCE, d_PRESSURE, d_MASS, d_DENSITY,d_TYPE, h, invh, particles_per_row, pitch, d_hashtable, hash, N);
_k_++;
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
}
// -> compute new velocity and compute new position
positionAndVelocity << <grid_size, block_size >> > (d_POSITION, d_VELOCITY, d_POSITION, d_VELOCITY, d_PRESSURE_FORCE, d_VISCOSITY_FORCE, d_ST_FORCE, gravity, d_MASS, delta_t, N);
// -> compute new world collision
collisionHandler << <grid_size, block_size >> > (d_POSITION, d_VELOCITY, d_NORMAL, d_TYPE, d_hashtable, h, invh, pitch, hash, particles_per_row, BOUNDARY_DIAMETER, epsilon, N);
// -> adapt time step
// criterias for changes in delta_t value according to session 3.3 of [2]
// getting max velocity, max force, max density error and average density error
gpuErrchk(hipMemcpy(DENSITY, d_DENSITY, N * sizeof(float), hipMemcpyDeviceToHost));
max_rho_err_t_1 = max_rho_err;
float max_velocity = 0.f;
float max_force = 0.f;
float sum_rho_err = 0.f;
hipLaunchKernelGGL(( resetValues), dim3(1),dim3(1), 0, 0, d_max_velocity, d_max_force, d_sum_rho_err, d_max_rho_err);
grid_size = N / block_size + 1;
getMaxVandF << <grid_size, block_size >> > (d_max_velocity, d_max_force, d_VELOCITY, d_PRESSURE_FORCE, d_VISCOSITY_FORCE, d_ST_FORCE, gravity, d_MASS,d_DENSITY,d_sum_rho_err,d_max_rho_err, rho_0, N);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipMemcpy(&max_velocity, d_max_velocity, sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(&max_force, d_max_force, sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(&sum_rho_err, d_sum_rho_err, sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(&max_rho_err, d_max_rho_err, sizeof(float), hipMemcpyDeviceToHost));
float avg_rho_err = sum_rho_err / N;
// criterias for delta_t increase
bool criteria1 = 0.19f * sqrt(h / max_force) > delta_t;
bool criteria2 = max_rho_err < 4.5f * max_vol_comp;
bool criteria3 = avg_rho_err < 0.9f * max_vol_comp;
bool criteria4 = 0.39f * (h/max_velocity) > delta_t;
if (criteria1 && criteria2 && criteria3 && criteria4) {
delta_t += delta_t * 0.2f / 100;
}
// criterias for delta_t decrease
criteria1 = 0.2f * sqrt(h / max_force) < delta_t;
criteria2 = max_rho_err > 5.5f * max_vol_comp;
criteria3 = avg_rho_err > max_vol_comp;
criteria4 = 0.4f * (h / max_velocity) <= delta_t;
if (criteria1 || criteria2 || criteria3 || criteria4) {
delta_t -= delta_t * 0.2f / 100;
}
// criterias for shock handling
criteria1 = max_rho_err - max_rho_err_t_1 > 8 * max_vol_comp;
criteria2 = max_rho_err > max_rho_fluc;
criteria3 = 0.45f * (h/max_velocity) < delta_t;
if (criteria1 || criteria2 || criteria3) {
//get last iteration greater or equal to 2
int last_iter = getLastIter(main_path);
char* iter_path = new char[100];
char* num_buffer = new char[32];
while (iteration - last_iter < 2) {
itoa(last_iter, num_buffer, 10);
strcpy(iter_path, vtu_path);
strcat(iter_path, "/iter");
strcat(iter_path, num_buffer);
strcat(iter_path, ".vtu");
remove(iter_path);
last_iter = getLastIter(main_path);
num_buffer = new char[32];
iter_path = new char[100];
}
std::cout << "\n\nSHOCK DETECTED! RETURNING " << iteration - last_iter << " ITERATIONS!\n" << std::endl;
write_pvd = false;
//SHOCK DETECTED
delta_t = delta_t / 5;
iteration = last_iter;
if (iteration <= 0) {
std::cout << "\nIMPOSSIBLE TO RETURN 2 ITERATIONS! TERMINATING SIMULATION\n" << std::endl;
return 1;
}
vec3d* position = (vec3d*)malloc(N * sizeof(vec3d));
vec3d* velocity = (vec3d*)malloc(N * sizeof(vec3d));
itoa(iteration, num_buffer, 10);
strcpy(iter_path, vtu_path);
strcat(iter_path, "/iter");
strcat(iter_path, num_buffer);
strcat(iter_path, ".vtu");
//read VTU file to go to the required step backwards
readVTU(iter_path, position, velocity);
//get the correct time of the previous iteration
getNewSimTime(main_path);
//edit PVD (group) file with the correct information
rewritePVD(main_path);
gpuErrchk(hipMemcpy(d_POSITION, position, 3 * N * sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_VELOCITY, velocity, 3 * N * sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
return 0;
}
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
if (simulation_time + delta_t > final_time) {
simulation_time = final_time;
}
else {
simulation_time += delta_t;
}
iteration++;
writeTimeKeeper(main_path,max_rho_err);
return 0;
}
// This function writes VTU files using multiple CPU cores
void multiprocessor_writer() {
char buf[1024];
itoa(iteration, buf, 10);
strcpy(vtu_fullpath, vtu_path);
strcat(vtu_fullpath, "/iter");
strcat(vtu_fullpath, buf);
strcat(vtu_fullpath, ".vtu");
std::future<void> write_vtu;
vec3d* write_position = (vec3d*)malloc(3 * N * sizeof(float));
vec3d* write_velocity = (vec3d*)malloc(3 * N * sizeof(float));
float* write_density = (float*)malloc(N * sizeof(float));
gpuErrchk(hipMemcpy(write_position, d_POSITION, N * 3 * sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(write_velocity, d_VELOCITY, N * 3 * sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(write_density, d_DENSITY, N * sizeof(float), hipMemcpyDeviceToHost));
float** pointData[] = { &write_density };
vec3d** vectorData[] = { &write_velocity };
int size_pointData = sizeof(pointData) / 8;
int size_vectorData = sizeof(vectorData) / 8;
write_vtu = std::async(std::launch::async, VTU_Writer, vtu_path, iteration, write_position, N, pointData, vectorData, pointDataNames, vectorDataNames, size_pointData, size_vectorData, vtu_fullpath,2);
if (write_pvd == true) {
strcpy(buf, vtu_fullpath);
VTK_Group(vtk_group_path, buf, simulation_time);
}
write_pvd = true;
return;
}
|
c0ad91b40797aaa30f37bea8e0aa66ab280d8c1d.cu
|
//This file defines the main functions of the simulation. These functions are called in the "main" function inside the "main.cu" file.
#define _USE_MATH_DEFINES
#include "particle_positions.cuh"
#include "utilities.cuh"
#include "VTK.cuh"
#include "hashing.cuh"
#include "particle_parameters.cuh"
#include <math.h>
#include <future>
#include <chrono>
#include <math.h>
//declaration of all global variables that are going to be used in this file by all functions
char main_path[1024]; //stores the main path of the result instance
char vtk_group_path[1024]; //stores the path of the vtk group file
char vtu_fullpath[1024]; //stores the path of the current iteration file
char vtu_path[1024]; //stores the path of the vtu directory (where the vtu files are saved)
std::string pointDataNames[] = { "density" }; //stores the names of the point data to display in Paraview
std::string vectorDataNames[] = {"velocity" }; //stores the names of the vector data to display in Paraview
vec3d* d_POSITION; //stores the pointer to the position data in the GPU
vec3d* d_PRED_POSITION; //stores the pointer to the predicted position data in the GPU
vec3d* d_VELOCITY; //stores the pointer to the velocity data in the GPU
vec3d* d_PRED_VELOCITY; //stores the pointer to the predicted data in the GPU
vec3d* d_ST_FORCE; //stores the pointer to the surface tension force data in the GPU
vec3d* d_VISCOSITY_FORCE; //stores the pointer to the viscosity force data in the GPU
vec3d* d_PRESSURE_FORCE; //stores the pointer to the pressure force data in the GPU
vec3d* d_NORMAL; //stores the pointer to the normal data in the GPU
float* DENSITY; //stores the pointer to the density data in the CPU
float* d_DENSITY; //stores the pointer to the density data in the GPU
float* d_PRESSURE; //stores the pointer to the pressure data in the GPU
float* d_MASS; //stores the pointer to the mass data in the GPU
int* d_TYPE; //stores the pointer to the type data in the GPU
int* d_hashtable; //stores the pointer to the hashtable data in the GPU
vec3d gravity; //stores the pointer to the gravity data in the CPU
//physical constants
float rho_0; //rest density
float visc_const; //viscosity constant
float st_const; // surface tension constant
float epsilon; // dumping coefficient for collision
//initial conditions
float PARTICLE_RADIUS; //stores the particle radius value
float MASS_calc; //stores the calculated mass value
float USER_MASS; //stores the mass defined by the user in
float PARTICLE_DIAMETER; //stores the particle diameter value
float F_INITIAL_POSITION[3]; //fluid particles initial position
float F_FINAL_POSITION[3]; //fluid particles final position
float B_INITIAL_POSITION[3]; //boundary particles final position
float B_FINAL_POSITION[3]; //boundary particles final position
float V_INITIAL[3]; //initial velocity defined by the user
//controlling iteration number and simulation time
int iteration = 1; //iteration counter
float simulation_time; //in seconds
float final_time; //in seconds
int N; //number of fluid particles
int B; //number of bondary particles
int T; //total number of particles
//variables for hashtable
size_t pitch; //this variable is defined by the GPU when the cudaMallocPitch runs
int particles_per_row; //this is the maximum number of neighbors a particle can have due to memory allocation
int hashtable_size; //this is the size of the hashtable. Must be a power of 2.
//CUDA variables
int block_size;
int grid_size;
//PCISPH variables
float invh; //inverse of the smoothing radius
float h; //smoothing radius
float vol_comp_perc; //user defined volume compression rate <- defined in section 3.3 of [2]
float dens_fluc_perc; //user defined density fluctuation rate <- defined in section 3.3 of [2]
float* d_max_force; // GPU pointer to max_force variable
float* d_max_velocity; // GPU pointer to max_velocity variable
float* d_max_rho_err; // GPU pointer to max_rho_err variable (max density error)
float* d_sum_rho_err; // GPU pointer to sum_rho_err variable (sum of all density errors across all variables to compute mean density error)
float delta_t; // time step
float max_vol_comp; // variable to stored computed value of max volume compression ( = rho_0 * vol_comp_perc / 100 )
float max_rho_fluc; // variable to stored computed value of max density fluctuation ( = rho_0 * dens_fluc_perc / 100 )
float BOUNDARY_DIAMETER; // diameter of boundary particles
float BOUNDARY_RADIUS; // radius of boundary particles
float pressure_delta; // defined in section 2.3 of [1] -> here this value is calculated without the "beta" variable, which is calculated afterwards
float max_rho_err_t_1 = 0.f; // max density error in the previous time_step
float max_rho_err = 0.f; // max density error in the current time_step (CPU memory)
bool write_pvd = true; // this tells either the program should or not write a file
char* user_results_folder = new char[256]; // user defined results folder
float save_steps; // user defined time steps to save a file
// this function reads all files in the /props folder and stores the values in the designated variables.
// If any new variable should be added or deleted in any of the props files, this function must be edited.
int fileReader() {
//allocating memory
char* row = new char[256]; //buffer for rows
int row_buff_index = 0; //index for row buffer
char* num_buffer = new char[256]; //buffer for numbers
int num_buffer_index = 0; //index for number buffer
float num; //stores a float variable
vec3d vec; //stores a vec3d variable
//Storing the names of varibles as they are in the files in /props folder
char* phys_props_names[] = { "rho_0","visc_const","surface_tension_const","collision_dumping_coeff" };
char* init_cond_names[] = {"particle_radius","mass","fluid_initial_coord","fluid_final_coord","boundary_initial_coord","boundary_final_coord","fluid_initial_velocity","maximum_volume_compression","maximum_density_fluctuation"};
char* system_names[] = { "initial_delta_t","initial_time","final_time","neighbors_per_particle", "save_steps","results_folder"};
int phys_props_size = sizeof(phys_props_names) / 8;
int init_cond_size = sizeof(init_cond_names) / 8;
int system_size = sizeof(system_names) / 8;
//storing the paths for each file
char* phys_props_path = "./props/physical_props.txt";
char* initial_conditions_path = "./props/initial_conditions.txt";
char* system_path = "./props/system.txt";
//Checking either the files exist or not -> give error and stops execution in case of error
if (fileExists(phys_props_path) != 0) {
std::cout << "\nERROR! Could not find physical properties file at " << phys_props_path << "\n";
return 1;
}
if (fileExists(phys_props_path) != 0) {
std::cout << "\nERROR! Could not find initial conditions file at " << phys_props_path << "\n";
return 1;
}
if (fileExists(phys_props_path) != 0) {
std::cout << "\nERROR! Could not find system names file at " << phys_props_path << "\n";
return 1;
}
//reading physical properties
std::ifstream phys_props (phys_props_path);
for (char write2line; phys_props.get(write2line);) {
if (phys_props.eof()) {
break;
}
if (write2line == 10) {
int i = 0;
for (i; i < phys_props_size; i++) {
if (strstr(row, phys_props_names[i]) != nullptr) {
break;
}
}
if (i < phys_props_size) {
bool save_char = false;
for (int j = 0; j < strlen(row); j++) {
if (row[j] == 61) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (!isdigit(row[k + 1])) {
j++;
}
else { break; }
}
}
else if (row[j] == 59) {
num = (float)atof(num_buffer);
num_buffer_index = 0;
num_buffer = new char[256];
break;
}
else if ((isdigit(row[j]) || row[j] == 46 || row[j] == 45) && save_char) {
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
if (i == 0) {
rho_0 = num;
}
else if (i == 1) {
visc_const = num;
}
else if (i == 2) {
st_const = num;
}
else if (i == 3) {
epsilon = num;
}
}
row = new char[256];
row_buff_index = 0;
}
else if (write2line != 10) {
row[row_buff_index] = write2line;
row_buff_index++;
}
}
row = new char[256];
row_buff_index = 0;
phys_props.close();
//reading initial conditions
std::ifstream init_conds(initial_conditions_path);
for (char write2line; init_conds.get(write2line);) {
if (init_conds.eof()) {
break;
}
if (write2line == 10) {
int i = 0;
for (i; i < init_cond_size; i++) {
if (strstr(row, init_cond_names[i]) != nullptr) {
break;
}
}
if (i < init_cond_size) {
if (strstr(row, "[") != nullptr) {
bool save_char = false;
int axis_count = 0;
for (int j = 0; j < strlen(row); j++) {
if (axis_count > 2) {
axis_count = 0;
break;
}
if (row[j] == 91) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (!isdigit(row[k + 1])) {
j++;
}
else { break; }
}
}
else if (row[j] == 44 || row[j] == 93) {
num = (float)atof(num_buffer);
if (axis_count == 0) {
vec.x = num;
} else if (axis_count == 1) {
vec.y = num;
}
else if (axis_count == 2) {
vec.z = num;
}
axis_count++;
if (row[j] == 32) {
j++;
}
num_buffer_index = 0;
num_buffer = new char[256];
}
else if ((isdigit(row[j]) || row[j] == 46 || row[j] == 45) && save_char) {
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
}
else {
bool save_char = false;
for (int j = 0; j < strlen(row); j++) {
if (row[j] == 61) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (!isdigit(row[k + 1])) {
j++;
}
else { break; }
}
}
else if (row[j] == 59) {
num = (float)atof(num_buffer);
num_buffer_index = 0;
num_buffer = new char[256];
break;
}
else if ((isdigit(row[j]) || row[j] == 46 || row[j] == 45) && save_char) {
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
}
if (i == 0) {
PARTICLE_RADIUS = num;
}
else if (i == 1) {
USER_MASS = num;
}
else if (i == 2) {
F_INITIAL_POSITION[0] = vec.x;
F_INITIAL_POSITION[1] = vec.y;
F_INITIAL_POSITION[2] = vec.z;
}
else if (i == 3) {
F_FINAL_POSITION[0] = vec.x;
F_FINAL_POSITION[1] = vec.y;
F_FINAL_POSITION[2] = vec.z;
}
else if (i == 4) {
B_INITIAL_POSITION[0] = vec.x;
B_INITIAL_POSITION[1] = vec.y;
B_INITIAL_POSITION[2] = vec.z;
}
else if (i == 5) {
B_FINAL_POSITION[0] = vec.x;
B_FINAL_POSITION[1] = vec.y;
B_FINAL_POSITION[2] = vec.z;
}
else if (i == 6) {
V_INITIAL[0] = vec.x;
V_INITIAL[1] = vec.y;
V_INITIAL[2] = vec.z;
}
else if (i == 7) {
vol_comp_perc = num;
}
else if (i == 8) {
dens_fluc_perc = num;
}
}
row = new char[256];
row_buff_index = 0;
}
else if (write2line != 10) {
row[row_buff_index] = write2line;
row_buff_index++;
}
}
row = new char[256];
row_buff_index = 0;
init_conds.close();
std::ifstream system_vars(system_path);
for (char write2line; system_vars.get(write2line);) {
if (system_vars.eof()) {
break;
}
if (write2line == 10) {
int i = 0;
for (i; i < system_size; i++) {
if (strstr(row, system_names[i]) != nullptr) {
break;
}
}
if (i < system_size) {
bool save_char = false;
if (strstr(row, "\"") != nullptr) {
for (int j = 0; j < strlen(row); j++) {
if (row[j] == 34 && !save_char) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (row[k+1] == 32) {
j++;
}
else { break; }
}
}
else if (row[j] == 34 && save_char) {
break;
}
else if (save_char){
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
}
else {
for (int j = 0; j < strlen(row); j++) {
if (row[j] == 61) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (!isdigit(row[k + 1])) {
j++;
}
else { break; }
}
}
else if (row[j] == 59) {
num = (float)atof(num_buffer);
num_buffer_index = 0;
num_buffer = new char[256];
break;
}
else if ((isdigit(row[j]) || row[j] == 46 || row[j] == 45) && save_char) {
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
}
if (i == 0) {
delta_t = num;
}
else if (i == 1) {
simulation_time = num;
}
else if (i == 2) {
final_time = num;
}
else if (i == 3) {
particles_per_row = (int)num;
}
else if (i == 4) {
save_steps = num;
}
else if (i == 5) {
user_results_folder = num_buffer;
}
}
row = new char[256];
row_buff_index = 0;
}
else if (write2line != 10) {
row[row_buff_index] = write2line;
row_buff_index++;
}
}
return 0;
}
// this function initialized the execution. It creates the particles, calculates some variables and allocate memory in the GPU for the main loop.
int initialize() {
//Display GPU information and checking if the program is running in a CUDA capable machine or not.
cudaDeviceProp* prop = new cudaDeviceProp;
gpuErrchk(cudaGetDeviceProperties(prop,0));
std::cout << "-----------------------------------------------\n";
std::cout << "DEVICE PROPERTIES:\n" << "Device name: " << prop->name << "\n" <<
"Max number of threads per block: " << prop->maxThreadsPerBlock << "\n" <<
"Total global memory: " << dround(prop->totalGlobalMem/1e9,2) << " gigabytes\n" <<
"Registers per block: " << prop->regsPerBlock << "\n" <<
"Shared Memory per block: " << prop->sharedMemPerBlock << " bytes\n" <<
"-----------------------------------------------\n";
block_size = prop->maxThreadsPerBlock; //stores the size of the thread blocks. Here its set to be the same size of the max threads per block of your GPU (1024 in the modern devices).
max_vol_comp = rho_0 * vol_comp_perc / 100;
max_rho_fluc = rho_0 * dens_fluc_perc / 100;
//If the user did not define a mass, calculate it.
if (USER_MASS == 0) {
MASS_calc = rho_0 * (float)M_PI * pow(PARTICLE_RADIUS, 3.f) / 3.f * 4.f;
}
else {
MASS_calc = USER_MASS;
}
PARTICLE_DIAMETER = 2 * PARTICLE_RADIUS;
// get main path of simulation
getMainPath(main_path);
// write path for vtu files
strcpy(vtu_path, main_path);
strcat(vtu_path, "/vtu");
// write path for vtk group file
strcpy(vtk_group_path, main_path);
strcat(vtk_group_path, "/PCISPH.pvd");
// create directory for vtu files
CreateDir(vtu_path);
float VOLUME = 1;
const int SIMULATION_DIMENSION = 3; //3 for a 3D simulation
// Get number per dimension (NPD) of FLUID particles for hexadecimal packing (assuming use of makeprism function)
int NPD[3]; //Number per dimension
for (int i = 0; i < 3; i++) {
if (i == 1) {
NPD[i] = static_cast<int>(floor((F_FINAL_POSITION[i] - F_INITIAL_POSITION[i]) / (sqrt(3.f) / 2.f * PARTICLE_DIAMETER)));
VOLUME = VOLUME * (F_FINAL_POSITION[i] - F_INITIAL_POSITION[i]);
}
else {
NPD[i] = static_cast<int>(floor((F_FINAL_POSITION[i] - F_INITIAL_POSITION[i]) / PARTICLE_DIAMETER));
VOLUME = VOLUME * (F_FINAL_POSITION[i] - F_INITIAL_POSITION[i]);
}
}
//Writing NPD to device
int* D_NPD; //Device pointer to NPD variable
gpuErrchk(cudaMalloc((void**)&D_NPD, SIMULATION_DIMENSION * sizeof(float))); //Allocate GPU memory
gpuErrchk(cudaMemcpy(D_NPD, NPD, SIMULATION_DIMENSION * sizeof(float), cudaMemcpyHostToDevice)); //Write NPD to D_NPD
N = NPD[0] * NPD[1] * NPD[2]; //number of fluid particles
int SIM_SIZE = N * SIMULATION_DIMENSION; //size of the fluid part of the simulation
const int x = 40; // Number of particles inside the smoothing length
h = powf(3.f * VOLUME * x / (4.f * (float)M_PI * N), 1.f / 3.f); //smoothing length
invh = 1 / h; // inverse of smoothing length (this is calculated to make things faster in the main loop)
vec3d f_initial; //initial position taking in account the offset of the particle radius
f_initial.x = F_INITIAL_POSITION[0] + PARTICLE_RADIUS;
f_initial.y = F_INITIAL_POSITION[1] + PARTICLE_RADIUS;
f_initial.z = F_INITIAL_POSITION[2] + PARTICLE_RADIUS;
size_t bytes_fluid_particles = SIM_SIZE * sizeof(float);
vec3d* FLUID_POSITIONS; //host pointer (CPU memory)
FLUID_POSITIONS = (vec3d*)malloc(bytes_fluid_particles); //allocating CPU memory
vec3d* D_FLUID_POSITIONS; //device pointer (GPU memory)
gpuErrchk(cudaMalloc((void**)&D_FLUID_POSITIONS, bytes_fluid_particles)); //allocating GPU memory
// grid -> number of blocks
// block -> number of threads
grid_size = N / block_size + 1; //defining number of blocks
//generate locations for each particle
//check "particle_positions.cuh" file in /lib folder for more details
makePrism << <grid_size, block_size >> > (D_FLUID_POSITIONS, PARTICLE_DIAMETER, f_initial, D_NPD, N);
BOUNDARY_DIAMETER = h/2; //defining the diameter of a boundary particle as stated in section 3.2 in [2]
BOUNDARY_RADIUS = h/4;
// Get number per dimension (NPD) of BOUNDARY particles without compact packing (assuming use of makebox function)
for (int i = 0; i < 3; i++) {
NPD[i] = static_cast<int>(ceil((B_FINAL_POSITION[i] - B_INITIAL_POSITION[i]) / BOUNDARY_DIAMETER)) + 2;
}
B = NPD[0] * NPD[1] * NPD[2] - (NPD[0] - 2) * (NPD[1] - 2) * (NPD[2] - 2); //Number of boundary particles
SIM_SIZE = NPD[0] * NPD[1] * NPD[2] * SIMULATION_DIMENSION;
vec3d b_initial; //initial position taking in account the offset of the boundary particle radius
b_initial.x = B_INITIAL_POSITION[0] - BOUNDARY_RADIUS;
b_initial.y = B_INITIAL_POSITION[1] - BOUNDARY_RADIUS;
b_initial.z = B_INITIAL_POSITION[2] - BOUNDARY_RADIUS;
vec3d b_final; //final position taking in account the offset of the boundary particle radius
b_final.x = b_initial.x + BOUNDARY_DIAMETER * (NPD[0] - 1);
b_final.y = b_initial.y + BOUNDARY_DIAMETER * (NPD[1] - 1);
b_final.z = b_initial.z + BOUNDARY_DIAMETER * (NPD[2] - 1);
size_t bytes_boundary_particles = SIM_SIZE * sizeof(float); // number of bytes the boundary particles are occupying
vec3d* BOUNDARY_POSITIONS; //host pointer (CPU memory)
BOUNDARY_POSITIONS = (vec3d*)malloc(bytes_boundary_particles); //allocate memory in the host
vec3d* D_BOUNDARY_POSITIONS; //device pointer (GPU memory)
gpuErrchk(cudaMalloc((void**)&D_BOUNDARY_POSITIONS, bytes_boundary_particles)); //allocate memory in the device
// this function makes an empty box with walls with 1 particle of thickness
// check "particle_positions.cuh" file in /lib folder for more details
makeBox(D_BOUNDARY_POSITIONS, BOUNDARY_DIAMETER, b_initial, b_final, block_size, D_NPD,NPD, SIMULATION_DIMENSION);
T = N + B; //Total number of particles
//writing particle position memory from GPU to CPU (note the "cudaMemcpyDeviceToHost" statement in the functions below)
gpuErrchk(cudaMemcpy(FLUID_POSITIONS, D_FLUID_POSITIONS, bytes_fluid_particles, cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(BOUNDARY_POSITIONS, D_BOUNDARY_POSITIONS, bytes_boundary_particles, cudaMemcpyDeviceToHost));
// Free GPU memory for fluid particles (this memory will be reallocated with another name soon)
cudaFree(D_FLUID_POSITIONS);
// HASHING ONLY FOR BOUNDARY PARTICLES
hashtable_size = powf(2, 19);
//creating a new hashtable
Hash b_hash(hashtable_size);
int* hashtable = new int[hashtable_size * particles_per_row];
//this loop creates an empty hashtable (full of -1s)
for (int i = 0; i < hashtable_size; ++i) {
for (int j = 0; j < particles_per_row; j++) {
hashtable[i * particles_per_row + j] = -1;
}
}
//allocating 2D memory for hashtable
gpuErrchk(cudaMallocPitch(&d_hashtable, &pitch, particles_per_row * sizeof(int), hashtable_size));
//writing clean hashtable to GPU
gpuErrchk(cudaMemcpy2D(d_hashtable, pitch, hashtable, particles_per_row * sizeof(int), particles_per_row * sizeof(int), hashtable_size, cudaMemcpyHostToDevice));
grid_size = B / block_size + 1;
//this function makes a functional hashtable
hashParticlePositions << <grid_size, block_size >> > (d_hashtable, D_BOUNDARY_POSITIONS, invh, b_hash, B, pitch, particles_per_row);
float* d_boundary_mass; //pointer to device memory of boundary "fake" mass ( or psi )
gpuErrchk(cudaMalloc((void**)&d_boundary_mass, B * sizeof(float)));
// calculates "fake" mass (or psi) for each boundary particle as state in [3]
// check "particle_parameters.cuh" file in /lib folder for more details
boundaryPsi << <grid_size, block_size >> > (d_boundary_mass, d_hashtable, rho_0, D_BOUNDARY_POSITIONS, h, invh, particles_per_row, pitch, b_hash, B);
float* boundary_mass = (float*)malloc(B * sizeof(float)); //CPU pointer to boundary mass
//copy boundary mass from GPU to CPU
gpuErrchk(cudaMemcpy(boundary_mass, d_boundary_mass, (size_t)B * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(d_boundary_mass));
vec3d* d_boundary_normal; //device pointer for boundary normal
gpuErrchk(cudaMalloc((void**)&d_boundary_normal, B * 3 * sizeof(float)));
// calculate normal for boundary particles
// check "particle_parameters.cuh" file in /lib folder for more details
boundaryNormal << <grid_size, block_size >> > (d_boundary_normal, D_BOUNDARY_POSITIONS, b_initial, b_final, B);
vec3d* boundary_normal = (vec3d*)malloc(B * 3 * sizeof(float)); //pointer for CPU memory of boundary normal
// copying boundary normal memory from GPU to CPU
gpuErrchk(cudaMemcpy(boundary_normal, d_boundary_normal, (size_t)B * 3 * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(d_boundary_normal)); //cleaning GPU memory of boundary normal (this will be reallocated later with another name)
// writing boundary vtu file
float** boundary_point_data[] = { &boundary_mass };
int size_pointData = sizeof(boundary_point_data) / sizeof(double);
vec3d** boundary_vectorData[] = { &boundary_normal };
int size_vectorData = sizeof(boundary_vectorData) / sizeof(double);
std::string boundary_pointDataNames[] = { "psi" };
std::string boundary_vectorDataNames[] = { "normal" };
VTU_Writer(main_path, iteration, BOUNDARY_POSITIONS, B, boundary_point_data, boundary_vectorData, boundary_pointDataNames, boundary_vectorDataNames, size_pointData, size_vectorData, vtu_fullpath, 1);
cudaFree(d_hashtable); //cleaning GPU from hashtable memory
cudaFree(D_BOUNDARY_POSITIONS); //cleaning GPU from boundary particle memory
// calculating pressure delta (without the beta variable) as stated in section 2.3 of [1]
int count = 0;
float min_r = std::numeric_limits<float>::infinity();
int selected_index;
int tmp_size = static_cast<int>(ceil((2 * (h + PARTICLE_DIAMETER)) / PARTICLE_DIAMETER));
vec3d* tmp_points = (vec3d*)malloc(tmp_size * tmp_size * tmp_size * 3 * sizeof(float));
// generating fake particle positions without any packing method (the same is done in [5])
for (float i = -h - PARTICLE_DIAMETER; i <= h + PARTICLE_DIAMETER; i += PARTICLE_DIAMETER) {
for (float j = -h - PARTICLE_DIAMETER; j <= h + PARTICLE_DIAMETER; j += PARTICLE_DIAMETER) {
for (float k = -h - PARTICLE_DIAMETER; k <= h + PARTICLE_DIAMETER; k += PARTICLE_DIAMETER) {
tmp_points[count].x = i;
tmp_points[count].y = j;
tmp_points[count].z = k;
count++;
float r = sqrt(i*i+j*j+k*k);
if (r < min_r) {
min_r = r;
selected_index = count;
}
}
}
}
vec3d selected_point = tmp_points[selected_index];
vec3d r_vector;
float r;
vec3d Grad_W;
Grad_W.x = 0.f;
Grad_W.y = 0.f;
Grad_W.z = 0.f;
float dot_Grad_W = 0.f;
// summation of the calculated kernel gradients
for (int i = 0; i < count; i++) {
r_vector.x = tmp_points[i].x - selected_point.x;
r_vector.y = tmp_points[i].y - selected_point.y;
r_vector.z = tmp_points[i].z - selected_point.z;
r = sqrt(r_vector.x* r_vector.x + r_vector.y* r_vector.y + r_vector.z* r_vector.z);
if (r <= h) {
vec3d inst_Grad_W = Poly6_Gradient(selected_index, i, tmp_points, r, h, invh);
Grad_W.x += inst_Grad_W.x;
Grad_W.y += inst_Grad_W.y;
Grad_W.z += inst_Grad_W.z;
dot_Grad_W += dot_product(inst_Grad_W, inst_Grad_W);
}
}
pressure_delta = -dot_product(Grad_W, Grad_W) - dot_Grad_W;
//Initializing main particle variables
//Defining and allocating main position variable
vec3d* POSITION = (vec3d*)malloc(3*T*sizeof(float));
for (int i = 0; i < N; i++) {
POSITION[i].x = FLUID_POSITIONS[i].x;
POSITION[i].y = FLUID_POSITIONS[i].y;
POSITION[i].z = FLUID_POSITIONS[i].z;
}
for (int i = N; i < T; i++) {
POSITION[i].x = BOUNDARY_POSITIONS[i - N].x;
POSITION[i].y = BOUNDARY_POSITIONS[i - N].y;
POSITION[i].z = BOUNDARY_POSITIONS[i - N].z;
}
free(BOUNDARY_POSITIONS);
free(FLUID_POSITIONS);
gpuErrchk(cudaMalloc((void**)&d_POSITION, 3*T*sizeof(float)));
gpuErrchk(cudaMemcpy(d_POSITION, POSITION, 3*T*sizeof(float), cudaMemcpyHostToDevice));
//Allocating memory for predicted positions and copying previous position vectors
gpuErrchk(cudaMalloc((void**)&d_PRED_POSITION, 3 * T * sizeof(float)));
gpuErrchk(cudaMemcpy(d_PRED_POSITION, POSITION, 3 * T * sizeof(float), cudaMemcpyHostToDevice));
//Allocating memory for predicted velocity
gpuErrchk(cudaMalloc((void**)&d_PRED_VELOCITY, 3 * N * sizeof(float)));
//Defining and allocating main velocity variable
vec3d* VELOCITY = (vec3d*)malloc(3*N*sizeof(float));
for (int i = 0; i < N; i++) {
VELOCITY[i].x = V_INITIAL[0];
VELOCITY[i].y = V_INITIAL[1];
VELOCITY[i].z = V_INITIAL[2];
}
gpuErrchk(cudaMalloc((void**)&d_VELOCITY, 3*N*sizeof(float)));
gpuErrchk(cudaMemcpy(d_VELOCITY, VELOCITY, 3*N*sizeof(float), cudaMemcpyHostToDevice));
//Defining and allocating main st force variable
vec3d* ST_FORCE = (vec3d*)malloc(3*N*sizeof(float));
for (int i = 0; i < N; i++) {
ST_FORCE[i].x = 0.f;
ST_FORCE[i].y = 0.f;
ST_FORCE[i].z = 0.f;
}
gpuErrchk(cudaMalloc((void**)&d_ST_FORCE, 3*N*sizeof(float)));
//Defining and allocating main viscosity force variable
vec3d* VISCOSITY_FORCE = (vec3d*)malloc(3*N*sizeof(float));
for (int i = 0; i < N; i++) {
VISCOSITY_FORCE[i].x = 0.f;
VISCOSITY_FORCE[i].y = 0.f;
VISCOSITY_FORCE[i].z = 0.f;
}
gpuErrchk(cudaMalloc((void**)&d_VISCOSITY_FORCE, 3*N*sizeof(float)));
//Defining and allocating main pressure force variable
vec3d* PRESSURE_FORCE = (vec3d*)malloc(3*N*sizeof(float));
for (int i = 0; i < N; i++) {
PRESSURE_FORCE[i].x = 0.f;
PRESSURE_FORCE[i].y = 0.f;
PRESSURE_FORCE[i].z = 0.f;
}
gpuErrchk(cudaMalloc((void**)&d_PRESSURE_FORCE, 3*N*sizeof(float)));
//Defining and allocating main normal variable
vec3d* NORMAL = (vec3d*)malloc(3*T*sizeof(float));
for (int i = 0; i < N; i++) {
NORMAL[i].x = 0.f;
NORMAL[i].y = 0.f;
NORMAL[i].z = 0.f;
}
for (int i = N; i < T; i++) {
NORMAL[i].x = boundary_normal[i - N].x;
NORMAL[i].y = boundary_normal[i - N].y;
NORMAL[i].z = boundary_normal[i - N].z;
}
free(boundary_normal);
gpuErrchk(cudaMalloc((void**)&d_NORMAL, 3*T*sizeof(float)));
gpuErrchk(cudaMemcpy(d_NORMAL, NORMAL, 3*T*sizeof(float), cudaMemcpyHostToDevice));
//Defining and allocating main density array
DENSITY = (float*)malloc(N * sizeof(float));
for (int i = 0; i < N; i++) {
DENSITY[i] = 0.f;
}
gpuErrchk(cudaMalloc((void**)&d_DENSITY, N * sizeof(float)));
gpuErrchk(cudaMemcpy(d_DENSITY, DENSITY, N * sizeof(float), cudaMemcpyHostToDevice));
//Defining and allocating main pressure array
float* PRESSURE = (float*)malloc(N * sizeof(float));
for (int i = 0; i < N; i++) {
PRESSURE[i] = 0;
}
gpuErrchk(cudaMalloc((void**)&d_PRESSURE, N * sizeof(float)));
//Defining and allocating main mass array
float* MASS = (float*)malloc(T * sizeof(float));
for (int i = 0; i < N; i++) {
MASS[i] = MASS_calc;
}
for (int i = N; i < T; i++) {
MASS[i] = boundary_mass[i - N];
}
free(boundary_mass);
gpuErrchk(cudaMalloc((void**)&d_MASS, T * sizeof(float)));
gpuErrchk(cudaMemcpy(d_MASS, MASS, T * sizeof(float), cudaMemcpyHostToDevice));
//Defining and allocating main type array (0 if fluid, 1 if boundary)
int* TYPE = (int*)malloc(T * sizeof(int));
for (int i = 0; i < N; i++) {
TYPE[i] = 0;
}
for (int i = N; i < T; i++) {
TYPE[i] = 1;
}
gpuErrchk(cudaMalloc((void**)&d_TYPE, T * sizeof(int)));
gpuErrchk(cudaMemcpy(d_TYPE, TYPE, T * sizeof(int), cudaMemcpyHostToDevice));
//Defining and allocating memory to store max density error
gpuErrchk(cudaMalloc((void**)&d_max_rho_err, sizeof(float)));
//Defining and allocating memory to store max force value
gpuErrchk(cudaMalloc((void**)&d_max_force, sizeof(float)));
//Defining and allocating memory to store max velocity value
gpuErrchk(cudaMalloc((void**)&d_max_velocity, sizeof(float)));
//Defining and allocating memory to store summation of density errors to calculate average error
gpuErrchk(cudaMalloc((void**)&d_sum_rho_err, sizeof(float)));
//defining gravity vector
gravity.x = 0.f;
gravity.y = -9.81f;
gravity.z = 0.f;
//Defining variables to write VTU files
float** pointData[] = { &DENSITY }; // here the CPU pointers to the FLOAT variables that you want to write in the VTU must be defined
vec3d** vectorData[] = { &VELOCITY }; // here the CPU pointers to the VEC3D variables that you want to write in the VTU must be defined
size_pointData = sizeof(pointData) / 8;
size_vectorData = sizeof(vectorData) / 8;
VTU_Writer(vtu_path, iteration, POSITION, N, pointData, vectorData, pointDataNames, vectorDataNames, size_pointData, size_vectorData, vtu_fullpath);
VTK_Group(vtk_group_path, vtu_fullpath, simulation_time);
// Initialize main hashtable
//allocating memory for GPU hashtable
gpuErrchk(cudaMallocPitch(&d_hashtable, &pitch, particles_per_row * sizeof(int), hashtable_size));
writeTimeKeeper(main_path,max_rho_err); //time keeper file with values for time, iteration and max density error
std::cout << N << " Fluid particles\n"
<< B << " Boundary particles\n"
<< "Total of " << T << " particles.\n"
<< "Smoothing radius = " << h << " m.\n"
<< "hashtable size = " << hashtable_size << "\n";
return 0;
}
// here is where the magic happens
// comments with -> refer to the same lines of the pseudo code in Algorithm 2 in [2]
// -> while animating do
int mainLoop() {
// -> for each particle i,b do
// -> find neighbors Ni,b(t)
// here the hashtable is initialized and reset
Hash hash(hashtable_size);
grid_size = hashtable_size / block_size + 1;
hashtableReset << <grid_size, block_size >> > (d_hashtable, particles_per_row, pitch, hashtable_size);
// then a new hashtable is created
grid_size = T / block_size + 1;
hashParticlePositions << <grid_size, block_size >> > (d_hashtable, d_POSITION, invh, hash, T, pitch, particles_per_row);
// -> for each particle i do
// here there are tow more step than the pseudo algorithm:
// calculate density
grid_size = N / block_size + 1;
DensityCalc << <grid_size, block_size >> > (d_POSITION, d_MASS, d_DENSITY, h, invh, rho_0, particles_per_row, pitch, d_hashtable, hash, N);
// and the normal for each fluid particle
fluidNormal << <grid_size, block_size >> > (d_NORMAL, d_POSITION, d_MASS, d_DENSITY,d_TYPE, rho_0, h,invh, hash,d_hashtable, particles_per_row,pitch, N);
// -> compute forces Fi for viscosity and surface tension (gravity is only accounted later)
nonPressureForces << <grid_size, block_size >> > (d_POSITION, d_VISCOSITY_FORCE, d_ST_FORCE, d_MASS, d_DENSITY, d_VELOCITY, d_NORMAL, gravity,d_TYPE, h, invh, rho_0, visc_const, st_const, particles_per_row, pitch,d_hashtable, hash, N);
// -> set pressure pi(t) = 0
resetPressure << <grid_size, block_size >> > (d_PRESSURE, N);
// here the step to set the pressure force value as 0 is ignored as it is done on later steps
// calculate the pressure coefficient as in Equation 8 of [1]
float pressure_coeff = -1 / (2 * powf(MASS_calc * delta_t / rho_0, 2) * pressure_delta);
gpuErrchk(cudaPeekAtLastError()); // this is for checking if there was any error during the kernel execution
gpuErrchk(cudaDeviceSynchronize());
int _k_ = 0; // defined with underscores to prevent overwritting
// -> while k < 3 do
while (_k_ < 3) {
// -> for each particle i do
// -> predicit velocity
// -> predicit position
grid_size = N / block_size + 1;
positionAndVelocity << <grid_size, block_size >> > (d_PRED_POSITION,d_PRED_VELOCITY,d_POSITION, d_VELOCITY, d_PRESSURE_FORCE, d_VISCOSITY_FORCE, d_ST_FORCE, gravity, d_MASS, delta_t, N);
// -> predict world collision
collisionHandler << <grid_size, block_size >> > (d_PRED_POSITION, d_PRED_VELOCITY, d_NORMAL, d_TYPE, d_hashtable, h, invh, pitch, hash, particles_per_row, BOUNDARY_DIAMETER, epsilon, N);
// reset and create new hashtable
grid_size = hashtable_size / block_size + 1;
hashtableReset << <grid_size, block_size >> > (d_hashtable, particles_per_row, pitch, hashtable_size);
grid_size = T / block_size + 1;
hashParticlePositions << <grid_size, block_size >> > (d_hashtable, d_PRED_POSITION, invh, hash, T, pitch, particles_per_row);
// update distances to neighbors is unnecessary here
// -> predict density
grid_size = N / block_size + 1;
DensityCalc << <grid_size, block_size >> > (d_PRED_POSITION, d_MASS, d_DENSITY, h, invh, rho_0, particles_per_row, pitch, d_hashtable, hash, N);
// -> predict density variation and -> update pressure
PressureCalc << <grid_size, block_size >> > (d_PRESSURE, d_DENSITY, rho_0, pressure_coeff, N);
// -> compute pressure force
PressureForceCalc << <grid_size, block_size >> > (d_PRED_POSITION, d_PRESSURE_FORCE, d_PRESSURE, d_MASS, d_DENSITY,d_TYPE, h, invh, particles_per_row, pitch, d_hashtable, hash, N);
_k_++;
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
// -> compute new velocity and compute new position
positionAndVelocity << <grid_size, block_size >> > (d_POSITION, d_VELOCITY, d_POSITION, d_VELOCITY, d_PRESSURE_FORCE, d_VISCOSITY_FORCE, d_ST_FORCE, gravity, d_MASS, delta_t, N);
// -> compute new world collision
collisionHandler << <grid_size, block_size >> > (d_POSITION, d_VELOCITY, d_NORMAL, d_TYPE, d_hashtable, h, invh, pitch, hash, particles_per_row, BOUNDARY_DIAMETER, epsilon, N);
// -> adapt time step
// criterias for changes in delta_t value according to session 3.3 of [2]
// getting max velocity, max force, max density error and average density error
gpuErrchk(cudaMemcpy(DENSITY, d_DENSITY, N * sizeof(float), cudaMemcpyDeviceToHost));
max_rho_err_t_1 = max_rho_err;
float max_velocity = 0.f;
float max_force = 0.f;
float sum_rho_err = 0.f;
resetValues<<<1,1>>>(d_max_velocity, d_max_force, d_sum_rho_err, d_max_rho_err);
grid_size = N / block_size + 1;
getMaxVandF << <grid_size, block_size >> > (d_max_velocity, d_max_force, d_VELOCITY, d_PRESSURE_FORCE, d_VISCOSITY_FORCE, d_ST_FORCE, gravity, d_MASS,d_DENSITY,d_sum_rho_err,d_max_rho_err, rho_0, N);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaMemcpy(&max_velocity, d_max_velocity, sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(&max_force, d_max_force, sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(&sum_rho_err, d_sum_rho_err, sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(&max_rho_err, d_max_rho_err, sizeof(float), cudaMemcpyDeviceToHost));
float avg_rho_err = sum_rho_err / N;
// criterias for delta_t increase
bool criteria1 = 0.19f * sqrt(h / max_force) > delta_t;
bool criteria2 = max_rho_err < 4.5f * max_vol_comp;
bool criteria3 = avg_rho_err < 0.9f * max_vol_comp;
bool criteria4 = 0.39f * (h/max_velocity) > delta_t;
if (criteria1 && criteria2 && criteria3 && criteria4) {
delta_t += delta_t * 0.2f / 100;
}
// criterias for delta_t decrease
criteria1 = 0.2f * sqrt(h / max_force) < delta_t;
criteria2 = max_rho_err > 5.5f * max_vol_comp;
criteria3 = avg_rho_err > max_vol_comp;
criteria4 = 0.4f * (h / max_velocity) <= delta_t;
if (criteria1 || criteria2 || criteria3 || criteria4) {
delta_t -= delta_t * 0.2f / 100;
}
// criterias for shock handling
criteria1 = max_rho_err - max_rho_err_t_1 > 8 * max_vol_comp;
criteria2 = max_rho_err > max_rho_fluc;
criteria3 = 0.45f * (h/max_velocity) < delta_t;
if (criteria1 || criteria2 || criteria3) {
//get last iteration greater or equal to 2
int last_iter = getLastIter(main_path);
char* iter_path = new char[100];
char* num_buffer = new char[32];
while (iteration - last_iter < 2) {
itoa(last_iter, num_buffer, 10);
strcpy(iter_path, vtu_path);
strcat(iter_path, "/iter");
strcat(iter_path, num_buffer);
strcat(iter_path, ".vtu");
remove(iter_path);
last_iter = getLastIter(main_path);
num_buffer = new char[32];
iter_path = new char[100];
}
std::cout << "\n\nSHOCK DETECTED! RETURNING " << iteration - last_iter << " ITERATIONS!\n" << std::endl;
write_pvd = false;
//SHOCK DETECTED
delta_t = delta_t / 5;
iteration = last_iter;
if (iteration <= 0) {
std::cout << "\nIMPOSSIBLE TO RETURN 2 ITERATIONS! TERMINATING SIMULATION\n" << std::endl;
return 1;
}
vec3d* position = (vec3d*)malloc(N * sizeof(vec3d));
vec3d* velocity = (vec3d*)malloc(N * sizeof(vec3d));
itoa(iteration, num_buffer, 10);
strcpy(iter_path, vtu_path);
strcat(iter_path, "/iter");
strcat(iter_path, num_buffer);
strcat(iter_path, ".vtu");
//read VTU file to go to the required step backwards
readVTU(iter_path, position, velocity);
//get the correct time of the previous iteration
getNewSimTime(main_path);
//edit PVD (group) file with the correct information
rewritePVD(main_path);
gpuErrchk(cudaMemcpy(d_POSITION, position, 3 * N * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_VELOCITY, velocity, 3 * N * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
return 0;
}
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
if (simulation_time + delta_t > final_time) {
simulation_time = final_time;
}
else {
simulation_time += delta_t;
}
iteration++;
writeTimeKeeper(main_path,max_rho_err);
return 0;
}
// This function writes VTU files using multiple CPU cores
void multiprocessor_writer() {
char buf[1024];
itoa(iteration, buf, 10);
strcpy(vtu_fullpath, vtu_path);
strcat(vtu_fullpath, "/iter");
strcat(vtu_fullpath, buf);
strcat(vtu_fullpath, ".vtu");
std::future<void> write_vtu;
vec3d* write_position = (vec3d*)malloc(3 * N * sizeof(float));
vec3d* write_velocity = (vec3d*)malloc(3 * N * sizeof(float));
float* write_density = (float*)malloc(N * sizeof(float));
gpuErrchk(cudaMemcpy(write_position, d_POSITION, N * 3 * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(write_velocity, d_VELOCITY, N * 3 * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(write_density, d_DENSITY, N * sizeof(float), cudaMemcpyDeviceToHost));
float** pointData[] = { &write_density };
vec3d** vectorData[] = { &write_velocity };
int size_pointData = sizeof(pointData) / 8;
int size_vectorData = sizeof(vectorData) / 8;
write_vtu = std::async(std::launch::async, VTU_Writer, vtu_path, iteration, write_position, N, pointData, vectorData, pointDataNames, vectorDataNames, size_pointData, size_vectorData, vtu_fullpath,2);
if (write_pvd == true) {
strcpy(buf, vtu_fullpath);
VTK_Group(vtk_group_path, buf, simulation_time);
}
write_pvd = true;
return;
}
|
d1cb5db7f720eda9c294079ec7f52d4d8aaefc41.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// cudamatrix/cu-choleskykernel.cu
// Copyright 2010-2013 Dr. Stephan Kramer
// Institut fur Numerische und Angewandte Mathematik
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
#include "cudamatrix/cu-choleskykernels-ansi.h"
#include <stdio.h>
#define TILE_SIZE 16
/***********************************************************************
* CUDA kernels
* some functions are templated to have the float/double operations
*/
__device__ int lex_index_2D (int r, int c, int row_length) {
return c + r*row_length;
}
__device__ int global_pos(int t_pos, int block_offset) {
return t_pos + TILE_SIZE*block_offset;
}
__device__ float inv_sqrt(float x) {
return rsqrtf(x);
}
__device__ double inv_sqrt(double x) {
return rsqrt(x);
}
template<typename T>
__global__
void __factorize_diagonal_block(T* A, int block_offset, MatrixDim d) {
int global_row_length = d.stride;
int col = threadIdx.x;
int row = threadIdx.y;
int global_row = global_pos(row,block_offset);
int global_col = global_pos(col,block_offset);
if ((global_row >= d.cols) || (global_col >= d.cols))
return;
int k_max = TILE_SIZE;
if (d.cols - global_pos(0,block_offset) < TILE_SIZE)
k_max = d.cols % TILE_SIZE;
int idx = lex_index_2D(global_row, global_col, global_row_length);
__shared__ T L[TILE_SIZE][TILE_SIZE+1];
L[row][col] = 0;
L[row][col] = A[idx];
__syncthreads();
if ((row >= k_max) || (col >= k_max))
return;
T fac;
for (int k = 0; k < k_max; k++) {
__syncthreads();
fac = inv_sqrt(L[k][k]);
__syncthreads();
if ((row==k)&&(col>=k))
L[col][row] = (L[col][row])*fac;
__syncthreads();
if ((row>=col)&&(col>k))
L[row][col] = L[row][col] - L[col][k]*L[row][k];
}
__syncthreads();
if (row >= col) {
A[idx] = L[row][col];
if (A[idx] > 100000)
A[idx] = 1;
}
}
template<typename T>
__global__
void __strip_update(T* A, int block_offset, MatrixDim d) {
int global_row_length = d.stride;
int boffy = block_offset;
int boffx = blockIdx.x + boffy + 1;
int col = threadIdx.x;
int row = threadIdx.y;
__shared__ T topleft[TILE_SIZE][TILE_SIZE+1];
__shared__ T workingmat[TILE_SIZE][TILE_SIZE+1];
int global_row = global_pos(row,block_offset);
int global_col = global_pos(col,block_offset);
if ((global_row >= d.cols) || (global_col >= d.cols))
return;
int idx = lex_index_2D(global_row, global_col, global_row_length);
topleft[row][col] = 0;
topleft[row][col] = A[idx];
//__syncthreads();
global_row = global_pos(row,boffx);
if (global_row >= d.cols)
return;
int idx_w = lex_index_2D(global_row, global_col, global_row_length);
//int row2 = row + block_offset * TILE_SIZE;
//int idx_w = row2 + col*global_row_length;
workingmat[col][row]=0;
workingmat[col][row]=A[idx_w];
__syncthreads();
if (row==0) {
for (int k = 0; k < TILE_SIZE; k++) {
T sum=0.0;
for (int m = 0; m < k; m++)
sum = sum + topleft[k][m]*workingmat[m][col];
workingmat[k][col] = (workingmat[k][col] - sum) / topleft[k][k];
}
}
__syncthreads();
A[idx_w] = workingmat[col][row];
if (A[idx_w] > 100000)
A[idx_w] = 1;
//A[idx_w] = 1;
}
template<typename T>
__global__
void __diag_update(T* A, int block_offset, MatrixDim d) {
int global_row_length = d.stride;
int boffx = blockIdx.x + block_offset + 1;
int col = threadIdx.x;
int row = threadIdx.y;
int global_row = global_pos(row,boffx);
int global_col = global_pos(col,block_offset);
if ((global_row >= d.cols) || (global_col >= d.cols))
return;
int idx = lex_index_2D(global_row, global_col, global_row_length);
__shared__ T left[TILE_SIZE][TILE_SIZE+1];
left[row][col] = 0;
left[row][col] = A[idx];
__syncthreads();
T sum = 0.0;
if (row >= col) {
for (int kk = 0; kk < TILE_SIZE; kk++)
sum = sum + left[row][kk]*left[col][kk];
//__syncthreads();
global_col = global_pos(col, boffx);
if (global_col >= d.cols)
return;
idx = lex_index_2D(global_row, global_col, global_row_length);
A[idx] = A[idx] - sum;
}
}
template<typename T>
__global__
void __lo_update(T* A, int block_offset, int n_blocks, MatrixDim d) {
int global_row_length = d.stride;
int col = threadIdx.x;
int row = threadIdx.y;
int boffy = blockIdx.y + block_offset + 1;
//int boffx = boffy + 1;
int boffx = boffy + 1;
__shared__ T left[TILE_SIZE][TILE_SIZE];
__shared__ T upt[TILE_SIZE][TILE_SIZE + 1];
int global_row = global_pos(row,boffy);
int global_col_src = global_pos(col,block_offset);
if ((global_row >= d.cols) || (global_col_src >= d.cols))
return;
int idx = lex_index_2D(global_row, global_col_src, global_row_length);
upt[row][col] = 0;
upt[row][col] = A[idx];
__syncthreads();
for (; boffx < n_blocks; boffx++) {
global_row = global_pos(row,boffx);
if (global_row >= d.cols)
return;
idx = lex_index_2D(global_row, global_col_src, global_row_length);
left[row][col] = 0;
left[row][col] = A[idx];
__syncthreads();
if (global_row >= d.cols)
return;
T matrixprod = 0.0;
for (int kk = 0; kk < TILE_SIZE; kk++)
matrixprod += left[row][kk]*upt[col][kk];
__syncthreads();
int global_col = global_pos(col,boffy);
if (global_col >= d.cols)
return;
idx = lex_index_2D(global_row, global_col, global_row_length);
A[idx] = A[idx] - matrixprod;
}
}
/***********************************************************************
* ANSI-C wrappers of CUDA kernels
*/
/*
* float
*/
void cudaF_factorize_diagonal_block(float* A, int block_offset, MatrixDim d) {
dim3 threads(TILE_SIZE,TILE_SIZE);
hipLaunchKernelGGL(( __factorize_diagonal_block), dim3(1),dim3(threads), 0, 0, A,block_offset,d);
hipDeviceSynchronize();
}
void cudaF_strip_update(float* A, int block_offset, int n_remaining_blocks, MatrixDim d) {
dim3 threads(TILE_SIZE,TILE_SIZE);
if (n_remaining_blocks >= 2) {
dim3 stripgrid(n_remaining_blocks-1);
hipLaunchKernelGGL(( __strip_update), dim3(stripgrid),dim3(threads), 0, 0, A,block_offset,d);
hipDeviceSynchronize();
} else {
int stripgrid = 1;
hipLaunchKernelGGL(( __strip_update), dim3(stripgrid),dim3(threads), 0, 0, A,block_offset,d);
hipDeviceSynchronize();
}
}
void cudaF_diag_update(float* A, int block_offset, int n_remaining_blocks, MatrixDim d) {
dim3 threads(TILE_SIZE,TILE_SIZE);
if (n_remaining_blocks >= 2) {
dim3 diaggrid(n_remaining_blocks-1);
hipLaunchKernelGGL(( __diag_update), dim3(diaggrid),dim3(threads), 0, 0, A,block_offset,d);
hipDeviceSynchronize();
} else {
int diaggrid = 1;
hipLaunchKernelGGL(( __diag_update), dim3(diaggrid),dim3(threads), 0, 0, A,block_offset,d);
hipDeviceSynchronize();
}
}
void cudaF_lo_update(float* A, int block_offset, int n_blocks, int n_remaining_blocks, MatrixDim d) {
dim3 logrid;
logrid.x = 1;
logrid.y = n_remaining_blocks-2;
dim3 threads(TILE_SIZE,TILE_SIZE);
hipLaunchKernelGGL(( __lo_update), dim3(logrid),dim3(threads), 0, 0, A,block_offset,n_blocks,d);
hipDeviceSynchronize();
}
/*
* double
*/
void cudaD_factorize_diagonal_block(double* A, int block_offset, MatrixDim d) {
dim3 threads(TILE_SIZE,TILE_SIZE);
hipLaunchKernelGGL(( __factorize_diagonal_block), dim3(1),dim3(threads), 0, 0, A,block_offset,d);
hipDeviceSynchronize();
}
void cudaD_strip_update(double* A, int block_offset, int n_remaining_blocks, MatrixDim d) {
dim3 threads(TILE_SIZE,TILE_SIZE);
if (n_remaining_blocks >= 2) {
dim3 stripgrid(n_remaining_blocks-1);
hipLaunchKernelGGL(( __strip_update), dim3(stripgrid),dim3(threads), 0, 0, A,block_offset,d);
hipDeviceSynchronize();
} else {
int stripgrid = 1;
hipLaunchKernelGGL(( __strip_update), dim3(stripgrid),dim3(threads), 0, 0, A,block_offset,d);
hipDeviceSynchronize();
}
}
void cudaD_diag_update(double* A, int block_offset, int n_remaining_blocks, MatrixDim d) {
dim3 threads(TILE_SIZE,TILE_SIZE);
if (n_remaining_blocks >= 2) {
dim3 diaggrid(n_remaining_blocks-1);
hipLaunchKernelGGL(( __diag_update), dim3(diaggrid),dim3(threads), 0, 0, A,block_offset,d);
hipDeviceSynchronize();
} else {
int diaggrid = 1;
hipLaunchKernelGGL(( __diag_update), dim3(diaggrid),dim3(threads), 0, 0, A,block_offset,d);
hipDeviceSynchronize();
}
}
void cudaD_lo_update(double* A, int block_offset, int n_blocks, int n_remaining_blocks, MatrixDim d) {
dim3 logrid;
logrid.x = 1;
logrid.y = n_remaining_blocks-2;
dim3 threads(TILE_SIZE,TILE_SIZE);
hipLaunchKernelGGL(( __lo_update), dim3(logrid),dim3(threads), 0, 0, A,block_offset,n_blocks,d);
hipDeviceSynchronize();
}
|
d1cb5db7f720eda9c294079ec7f52d4d8aaefc41.cu
|
// cudamatrix/cu-choleskykernel.cu
// Copyright 2010-2013 Dr. Stephan Kramer
// Institut fur Numerische und Angewandte Mathematik
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
#include "cudamatrix/cu-choleskykernels-ansi.h"
#include <stdio.h>
#define TILE_SIZE 16
/***********************************************************************
* CUDA kernels
* some functions are templated to have the float/double operations
*/
__device__ int lex_index_2D (int r, int c, int row_length) {
return c + r*row_length;
}
__device__ int global_pos(int t_pos, int block_offset) {
return t_pos + TILE_SIZE*block_offset;
}
__device__ float inv_sqrt(float x) {
return rsqrtf(x);
}
__device__ double inv_sqrt(double x) {
return rsqrt(x);
}
template<typename T>
__global__
void __factorize_diagonal_block(T* A, int block_offset, MatrixDim d) {
int global_row_length = d.stride;
int col = threadIdx.x;
int row = threadIdx.y;
int global_row = global_pos(row,block_offset);
int global_col = global_pos(col,block_offset);
if ((global_row >= d.cols) || (global_col >= d.cols))
return;
int k_max = TILE_SIZE;
if (d.cols - global_pos(0,block_offset) < TILE_SIZE)
k_max = d.cols % TILE_SIZE;
int idx = lex_index_2D(global_row, global_col, global_row_length);
__shared__ T L[TILE_SIZE][TILE_SIZE+1];
L[row][col] = 0;
L[row][col] = A[idx];
__syncthreads();
if ((row >= k_max) || (col >= k_max))
return;
T fac;
for (int k = 0; k < k_max; k++) {
__syncthreads();
fac = inv_sqrt(L[k][k]);
__syncthreads();
if ((row==k)&&(col>=k))
L[col][row] = (L[col][row])*fac;
__syncthreads();
if ((row>=col)&&(col>k))
L[row][col] = L[row][col] - L[col][k]*L[row][k];
}
__syncthreads();
if (row >= col) {
A[idx] = L[row][col];
if (A[idx] > 100000)
A[idx] = 1;
}
}
template<typename T>
__global__
void __strip_update(T* A, int block_offset, MatrixDim d) {
int global_row_length = d.stride;
int boffy = block_offset;
int boffx = blockIdx.x + boffy + 1;
int col = threadIdx.x;
int row = threadIdx.y;
__shared__ T topleft[TILE_SIZE][TILE_SIZE+1];
__shared__ T workingmat[TILE_SIZE][TILE_SIZE+1];
int global_row = global_pos(row,block_offset);
int global_col = global_pos(col,block_offset);
if ((global_row >= d.cols) || (global_col >= d.cols))
return;
int idx = lex_index_2D(global_row, global_col, global_row_length);
topleft[row][col] = 0;
topleft[row][col] = A[idx];
//__syncthreads();
global_row = global_pos(row,boffx);
if (global_row >= d.cols)
return;
int idx_w = lex_index_2D(global_row, global_col, global_row_length);
//int row2 = row + block_offset * TILE_SIZE;
//int idx_w = row2 + col*global_row_length;
workingmat[col][row]=0;
workingmat[col][row]=A[idx_w];
__syncthreads();
if (row==0) {
for (int k = 0; k < TILE_SIZE; k++) {
T sum=0.0;
for (int m = 0; m < k; m++)
sum = sum + topleft[k][m]*workingmat[m][col];
workingmat[k][col] = (workingmat[k][col] - sum) / topleft[k][k];
}
}
__syncthreads();
A[idx_w] = workingmat[col][row];
if (A[idx_w] > 100000)
A[idx_w] = 1;
//A[idx_w] = 1;
}
template<typename T>
__global__
void __diag_update(T* A, int block_offset, MatrixDim d) {
int global_row_length = d.stride;
int boffx = blockIdx.x + block_offset + 1;
int col = threadIdx.x;
int row = threadIdx.y;
int global_row = global_pos(row,boffx);
int global_col = global_pos(col,block_offset);
if ((global_row >= d.cols) || (global_col >= d.cols))
return;
int idx = lex_index_2D(global_row, global_col, global_row_length);
__shared__ T left[TILE_SIZE][TILE_SIZE+1];
left[row][col] = 0;
left[row][col] = A[idx];
__syncthreads();
T sum = 0.0;
if (row >= col) {
for (int kk = 0; kk < TILE_SIZE; kk++)
sum = sum + left[row][kk]*left[col][kk];
//__syncthreads();
global_col = global_pos(col, boffx);
if (global_col >= d.cols)
return;
idx = lex_index_2D(global_row, global_col, global_row_length);
A[idx] = A[idx] - sum;
}
}
template<typename T>
__global__
void __lo_update(T* A, int block_offset, int n_blocks, MatrixDim d) {
int global_row_length = d.stride;
int col = threadIdx.x;
int row = threadIdx.y;
int boffy = blockIdx.y + block_offset + 1;
//int boffx = boffy + 1;
int boffx = boffy + 1;
__shared__ T left[TILE_SIZE][TILE_SIZE];
__shared__ T upt[TILE_SIZE][TILE_SIZE + 1];
int global_row = global_pos(row,boffy);
int global_col_src = global_pos(col,block_offset);
if ((global_row >= d.cols) || (global_col_src >= d.cols))
return;
int idx = lex_index_2D(global_row, global_col_src, global_row_length);
upt[row][col] = 0;
upt[row][col] = A[idx];
__syncthreads();
for (; boffx < n_blocks; boffx++) {
global_row = global_pos(row,boffx);
if (global_row >= d.cols)
return;
idx = lex_index_2D(global_row, global_col_src, global_row_length);
left[row][col] = 0;
left[row][col] = A[idx];
__syncthreads();
if (global_row >= d.cols)
return;
T matrixprod = 0.0;
for (int kk = 0; kk < TILE_SIZE; kk++)
matrixprod += left[row][kk]*upt[col][kk];
__syncthreads();
int global_col = global_pos(col,boffy);
if (global_col >= d.cols)
return;
idx = lex_index_2D(global_row, global_col, global_row_length);
A[idx] = A[idx] - matrixprod;
}
}
/***********************************************************************
* ANSI-C wrappers of CUDA kernels
*/
/*
* float
*/
void cudaF_factorize_diagonal_block(float* A, int block_offset, MatrixDim d) {
dim3 threads(TILE_SIZE,TILE_SIZE);
__factorize_diagonal_block<<<1,threads>>>(A,block_offset,d);
cudaThreadSynchronize();
}
void cudaF_strip_update(float* A, int block_offset, int n_remaining_blocks, MatrixDim d) {
dim3 threads(TILE_SIZE,TILE_SIZE);
if (n_remaining_blocks >= 2) {
dim3 stripgrid(n_remaining_blocks-1);
__strip_update<<<stripgrid,threads>>>(A,block_offset,d);
cudaThreadSynchronize();
} else {
int stripgrid = 1;
__strip_update<<<stripgrid,threads>>>(A,block_offset,d);
cudaThreadSynchronize();
}
}
void cudaF_diag_update(float* A, int block_offset, int n_remaining_blocks, MatrixDim d) {
dim3 threads(TILE_SIZE,TILE_SIZE);
if (n_remaining_blocks >= 2) {
dim3 diaggrid(n_remaining_blocks-1);
__diag_update<<<diaggrid,threads>>>(A,block_offset,d);
cudaThreadSynchronize();
} else {
int diaggrid = 1;
__diag_update<<<diaggrid,threads>>>(A,block_offset,d);
cudaThreadSynchronize();
}
}
void cudaF_lo_update(float* A, int block_offset, int n_blocks, int n_remaining_blocks, MatrixDim d) {
dim3 logrid;
logrid.x = 1;
logrid.y = n_remaining_blocks-2;
dim3 threads(TILE_SIZE,TILE_SIZE);
__lo_update<<<logrid,threads>>>(A,block_offset,n_blocks,d);
cudaThreadSynchronize();
}
/*
* double
*/
void cudaD_factorize_diagonal_block(double* A, int block_offset, MatrixDim d) {
dim3 threads(TILE_SIZE,TILE_SIZE);
__factorize_diagonal_block<<<1,threads>>>(A,block_offset,d);
cudaThreadSynchronize();
}
void cudaD_strip_update(double* A, int block_offset, int n_remaining_blocks, MatrixDim d) {
dim3 threads(TILE_SIZE,TILE_SIZE);
if (n_remaining_blocks >= 2) {
dim3 stripgrid(n_remaining_blocks-1);
__strip_update<<<stripgrid,threads>>>(A,block_offset,d);
cudaThreadSynchronize();
} else {
int stripgrid = 1;
__strip_update<<<stripgrid,threads>>>(A,block_offset,d);
cudaThreadSynchronize();
}
}
void cudaD_diag_update(double* A, int block_offset, int n_remaining_blocks, MatrixDim d) {
dim3 threads(TILE_SIZE,TILE_SIZE);
if (n_remaining_blocks >= 2) {
dim3 diaggrid(n_remaining_blocks-1);
__diag_update<<<diaggrid,threads>>>(A,block_offset,d);
cudaThreadSynchronize();
} else {
int diaggrid = 1;
__diag_update<<<diaggrid,threads>>>(A,block_offset,d);
cudaThreadSynchronize();
}
}
void cudaD_lo_update(double* A, int block_offset, int n_blocks, int n_remaining_blocks, MatrixDim d) {
dim3 logrid;
logrid.x = 1;
logrid.y = n_remaining_blocks-2;
dim3 threads(TILE_SIZE,TILE_SIZE);
__lo_update<<<logrid,threads>>>(A,block_offset,n_blocks,d);
cudaThreadSynchronize();
}
|
1e41e13d695585f38f8ff820320581277789be9c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "hl_base.h"
#include "hl_device_functions.cuh"
#include "hl_gpu_matrix_kernel.cuh"
#include "hl_matrix.h"
#include "hl_matrix_apply.cuh"
#include "hl_matrix_ops.cuh"
#include "hl_sequence.h"
#include "hl_sparse.ph"
#include "paddle/utils/Logging.h"
DEFINE_MATRIX_UNARY_OP(Zero, a = 0);
DEFINE_MATRIX_TERNARY_PARAMETER_OP(_add, TWO_PARAMETER, c = p1 * a + p2 * b);
void hl_matrix_add(real* A_d,
real* B_d,
real* C_d,
int dimM,
int dimN,
real alpha,
real beta) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(B_d);
CHECK_NOTNULL(C_d);
hl_gpu_apply_ternary_op<real, ternary::_add<real>, 0, 0>(
ternary::_add<real>(alpha, beta),
A_d,
B_d,
C_d,
dimM,
dimN,
dimN,
dimN,
dimN);
CHECK_SYNC("hl_matrix_add failed");
}
#ifdef PADDLE_TYPE_DOUBLE
#define THRESHOLD 128
#else
#define THRESHOLD 64
#endif
__device__ __forceinline__ void findMax(real* I,
real* dfMax_s,
int blockSize,
int base,
int curIdx,
int nextIdx,
int dimN,
real* max) {
dfMax_s[base] = -1.0e20;
while (curIdx < dimN) {
if (dfMax_s[base] < I[nextIdx]) {
dfMax_s[base] = I[nextIdx];
}
nextIdx += blockSize;
curIdx += blockSize;
}
__syncthreads();
for (int stride = blockSize >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (base < stride) {
nextIdx = base + stride;
if (dfMax_s[base] < dfMax_s[nextIdx]) {
dfMax_s[base] = dfMax_s[nextIdx];
}
}
}
if (0 == base) {
max[0] = dfMax_s[0];
}
__syncthreads();
}
__device__ __forceinline__ void subMaxAndExp(real* I,
real* O,
int curIdx,
int nextIdx,
int blockSize,
int dimN,
real max) {
real val;
while (curIdx < dimN) {
val = I[nextIdx] - max;
if (val < -THRESHOLD) {
val = -THRESHOLD;
}
I[nextIdx] = val;
#ifndef PADDLE_TYPE_DOUBLE
O[nextIdx] = __expf(val);
#else
O[nextIdx] = exp(val);
#endif
nextIdx += blockSize;
curIdx += blockSize;
}
__syncthreads();
}
__device__ __forceinline__ void valueSum(real* O,
real* dfMax_s,
int blockSize,
int base,
int curIdx,
int nextIdx,
int dimN) {
dfMax_s[base] = 0;
while (curIdx < dimN) {
dfMax_s[base] += O[nextIdx];
nextIdx += blockSize;
curIdx += blockSize;
}
__syncthreads();
for (int stride = blockSize >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (base < stride) {
nextIdx = base + stride;
dfMax_s[base] += dfMax_s[nextIdx];
}
}
__syncthreads();
}
__device__ __forceinline__ void divSum(
real* O, real sum, int curIdx, int nextIdx, int blockSize, int dimN) {
while (curIdx < dimN) {
O[nextIdx] /= sum;
nextIdx += blockSize;
curIdx += blockSize;
}
}
__device__ __forceinline__ void softmax(real* I,
real* O,
real* dfMax_s,
int blockSize,
int base,
int curIdx,
int nextIdx,
int dimN) {
__shared__ real max;
// find the max number
findMax(I, dfMax_s, blockSize, base, curIdx, nextIdx, dimN, &max);
// sub max Value and do Exp operation
subMaxAndExp(I, O, base, nextIdx, blockSize, dimN, max);
// add dimN values into blockDim.x buffer
// sum is in dfMax_s[0]
valueSum(O, dfMax_s, blockSize, base, curIdx, nextIdx, dimN);
// divided by sum
divSum(O, dfMax_s[0], curIdx, nextIdx, blockSize, dimN);
}
template <int blockSize>
__global__ void KeMatrixSoftMax(real* O, real* I, int dimN) {
int base = threadIdx.x;
__shared__ real dfMax_s[blockSize];
int nextIdx = blockIdx.x * dimN + base;
int curIdx = base;
softmax(I, O, dfMax_s, blockSize, base, curIdx, nextIdx, dimN);
}
void hl_matrix_softmax(real* A_d, real* C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
dim3 block(512, 1);
dim3 grid(dimM, 1);
hipLaunchKernelGGL(( KeMatrixSoftMax<512>), dim3(grid), dim3(block), 0, STREAM_DEFAULT, C_d, A_d, dimN);
CHECK_SYNC("hl_matrix_softmax failed");
}
template <int blockSize>
__global__ void KeSequenceSoftMax(real* O, real* I, const int* index) {
int base = threadIdx.x;
int bid = blockIdx.x;
__shared__ real dfMax_s[blockSize];
int start = index[bid];
int dimN = index[bid + 1] - start;
int nextIdx = start + base;
int curIdx = base;
softmax(I, O, dfMax_s, blockSize, base, curIdx, nextIdx, dimN);
}
void hl_sequence_softmax_forward(real* A_d,
real* C_d,
const int* index,
int numSequence) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
dim3 block(512, 1);
dim3 grid(numSequence, 1);
hipLaunchKernelGGL(( KeSequenceSoftMax<512>), dim3(grid), dim3(block), 0, STREAM_DEFAULT, C_d, A_d, index);
CHECK_SYNC("hl_sequence_softmax_forward failed");
}
__global__ void KeMatrixDerivative(
real* grad_d, real* output_d, real* sftmaxSum_d, int dimM, int dimN) {
int rowIdx = blockIdx.x * blockDim.x + threadIdx.x;
int colIdx = blockIdx.y * blockDim.y + threadIdx.y;
int index;
if (rowIdx < dimM && colIdx < dimN) {
index = rowIdx * dimN + colIdx;
grad_d[index] = output_d[index] * (grad_d[index] - sftmaxSum_d[rowIdx]);
}
}
void hl_matrix_softmax_derivative(
real* grad_d, real* output_d, real* sftmaxSum_d, int dimM, int dimN) {
CHECK_NOTNULL(grad_d);
CHECK_NOTNULL(output_d);
CHECK_NOTNULL(sftmaxSum_d);
int blocksX = (dimM + 0) / 1;
int blocksY = (dimN + 1024 - 1) / 1024;
dim3 threads(1, 1024);
dim3 grid(blocksX, blocksY);
hipLaunchKernelGGL(( KeMatrixDerivative), dim3(grid), dim3(threads), 0, STREAM_DEFAULT,
grad_d, output_d, sftmaxSum_d, dimM, dimN);
CHECK_SYNC("hl_matrix_softmax_derivative failed");
}
__global__ void KeMatrixMultiBinaryCrossEntropy(
real* output, real* entropy, int* row, int* col, int dimM, int dimN) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < dimM) {
for (int i = 0; i < dimN; i++) {
entropy[index] -= log(1 - output[index * dimN + i]);
}
int* row_col = col + row[index];
int col_num = row[index + 1] - row[index];
for (int i = 0; i < col_num; i++) {
real o = output[index * dimN + row_col[i]];
entropy[index] -= log(o / (1 - o));
}
}
}
void hl_matrix_multi_binary_cross_entropy(real* output,
real* entropy,
hl_sparse_matrix_s csr_mat,
int dimM,
int dimN) {
CHECK_NOTNULL(output);
CHECK_NOTNULL(entropy);
CHECK_NOTNULL(csr_mat);
CHECK_EQ(csr_mat->format, HL_SPARSE_CSR);
int n_threads = 1024;
int blocks = (dimM + n_threads - 1) / n_threads;
dim3 threads(n_threads);
dim3 grid(blocks);
hl_csr_matrix mat = (hl_csr_matrix)(csr_mat->matrix);
hipLaunchKernelGGL(( KeMatrixMultiBinaryCrossEntropy), dim3(grid), dim3(threads), 0, STREAM_DEFAULT,
output, entropy, mat->csr_row, mat->csr_col, dimM, dimN);
CHECK_SYNC("hl_matrix_multi_binary_cross_entropy failed");
}
__global__ void KeMatrixMultiBinaryCrossEntropyBp(
real* output, real* grad, int* row, int* col, int dimM, int dimN) {
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (row_idx < dimM) {
for (int i = 0; i < dimN; i++) {
int index = row_idx * dimN + i;
grad[index] += 1.0 / (1 - output[index]);
}
int col_num = row[row_idx + 1] - row[row_idx];
int* row_col = col + row[row_idx];
for (int i = 0; i < col_num; i++) {
int index = row_idx * dimN + row_col[i];
grad[index] -= 1.0 / (output[index] * (1 - output[index]));
}
}
}
void hl_matrix_multi_binary_cross_entropy_bp(
real* output, real* grad, hl_sparse_matrix_s csr_mat, int dimM, int dimN) {
CHECK_NOTNULL(output);
CHECK_NOTNULL(grad);
CHECK_NOTNULL(csr_mat);
CHECK_EQ(csr_mat->format, HL_SPARSE_CSR);
int n_threads = 1024;
int blocks = (dimM + n_threads - 1) / n_threads;
dim3 threads(n_threads);
dim3 grid(blocks);
hl_csr_matrix mat = (hl_csr_matrix)(csr_mat->matrix);
hipLaunchKernelGGL(( KeMatrixMultiBinaryCrossEntropyBp), dim3(grid), dim3(threads), 0, STREAM_DEFAULT,
output, grad, mat->csr_row, mat->csr_col, dimM, dimN);
CHECK_SYNC("hl_matrix_multi_binary_cross_entropy_bp failed");
}
__global__ void KeMatrixCrossEntropy(
real* O, real* E, int* label, int dimM, int dimN) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int newBase;
if (index < dimM) {
newBase = label[index];
newBase = newBase % dimN;
E[index] = -log(O[index * dimN + newBase]);
}
}
void hl_matrix_cross_entropy(
real* A_d, real* C_d, int* label_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
int blocks = (dimM + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KeMatrixCrossEntropy), dim3(grid), dim3(threads), 0, STREAM_DEFAULT,
A_d, C_d, label_d, dimM, dimN);
CHECK_SYNC("hl_matrix_cross_entropy failed");
}
__global__ void KeMatrixCrossEntropyBp(
real* grad_d, real* output_d, int* label_d, int dimM, int dimN) {
int rowIdx = blockIdx.x * blockDim.x + threadIdx.x;
int colIdx = blockIdx.y * blockDim.y + threadIdx.y;
int index;
if (rowIdx < dimM && colIdx < dimN) {
index = rowIdx * dimN + colIdx;
if (label_d[rowIdx] == colIdx) {
grad_d[index] -= 1.0f / output_d[index];
}
}
}
void hl_matrix_cross_entropy_bp(
real* grad_d, real* output_d, int* label_d, int dimM, int dimN) {
CHECK_NOTNULL(grad_d);
CHECK_NOTNULL(output_d);
CHECK_NOTNULL(label_d);
int blocksX = (dimM + 0) / 1;
int blocksY = (dimN + 1024 - 1) / 1024;
dim3 threads(1, 1024);
dim3 grid(blocksX, blocksY);
hipLaunchKernelGGL(( KeMatrixCrossEntropyBp), dim3(grid), dim3(threads), 0, STREAM_DEFAULT,
grad_d, output_d, label_d, dimM, dimN);
CHECK_SYNC("hl_matrix_cross_entropy_bp failed");
}
void hl_matrix_zero_mem(real* data, int num) {
hl_gpu_apply_unary_op(unary::Zero<real>(), data, 1, num, num);
}
__global__ void KeParamReluForward(real* output,
real* input,
real* w,
int width,
int height,
int partial_sum) {
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx < width && ty < height) {
int index = ty * width + tx;
output[index] =
input[index] > 0 ? input[index] : input[index] * w[tx / partial_sum];
}
}
void hl_param_relu_forward(real* output,
real* input,
real* w,
int width,
int height,
int partial_sum) {
CHECK_NOTNULL(output);
CHECK_NOTNULL(input);
CHECK_NOTNULL(w);
dim3 threads(16, 16);
int blockX = (width + 16 - 1) / 16;
int blockY = (height + 16 - 1) / 16;
dim3 grid(blockX, blockY);
hipLaunchKernelGGL(( KeParamReluForward), dim3(grid), dim3(threads), 0, STREAM_DEFAULT,
output, input, w, width, height, partial_sum);
CHECK_SYNC("hl_param_relu_forward failed");
}
template <int blockSize>
__global__ void KeParamReluBackWardW(real* grad_w,
real* grad_o,
real* input,
int width,
int height,
int partial_sum) {
const int tid = threadIdx.x;
__shared__ real temp[blockSize];
grad_o += partial_sum * blockIdx.x;
input += partial_sum * blockIdx.x;
real tmp = 0.0;
for (int index = tid; index < partial_sum * height; index += blockSize) {
int row = index / partial_sum;
int offset = row * width + (index - row * partial_sum);
if (input[offset] < 0) {
tmp += grad_o[offset] * input[offset];
}
}
temp[tid] = tmp;
__syncthreads();
for (int s = blockSize / 2; s > 0; s >>= 1) {
if (tid < s) {
temp[tid] += temp[tid + s];
}
__syncthreads();
}
if (tid == 0) {
grad_w[blockIdx.x] += temp[0];
}
}
void hl_param_relu_backward_w(real* grad_w,
real* grad_o,
real* input,
int width,
int height,
int partial_sum) {
CHECK_NOTNULL(grad_w);
CHECK_NOTNULL(grad_o);
CHECK_NOTNULL(input);
const int blockSize = 1024;
int grid_num = width / partial_sum;
dim3 threads(blockSize, 1);
dim3 grid(grid_num, 1);
hipLaunchKernelGGL(( KeParamReluBackWardW<blockSize>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT,
grad_w, grad_o, input, width, height, partial_sum);
CHECK_SYNC("hl_param_relu_backward_w failed");
}
__global__ void KeParamReluBackwardDiff(real* grad_o,
real* input,
real* w,
real* diff,
int width,
int height,
int partial_sum) {
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx < width && ty < height) {
int index = ty * width + tx;
diff[index] += grad_o[index] * (input[index] > 0 ? 1 : w[tx / partial_sum]);
}
}
void hl_param_relu_backward_diff(real* grad_o,
real* data,
real* w,
real* diff,
int width,
int height,
int partial_sum) {
CHECK_NOTNULL(grad_o);
CHECK_NOTNULL(data);
CHECK_NOTNULL(w);
CHECK_NOTNULL(diff);
dim3 threads(16, 16);
int blockX = (width + 16 - 1) / 16;
int blockY = (height + 16 - 1) / 16;
dim3 grid(blockX, blockY);
hipLaunchKernelGGL(( KeParamReluBackwardDiff), dim3(grid), dim3(threads), 0, STREAM_DEFAULT,
grad_o, data, w, diff, width, height, partial_sum);
CHECK_SYNC("hl_param_relu_backward_diff failed");
}
__global__ void KeMatrixAddSharedBias(
real* A, real* B, const int channel, const int M, const int N, real scale) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int dim = N / channel;
if (index < M * N) {
int i = index % N;
i = i / dim;
A[index] += scale * B[i];
}
}
void hl_matrix_add_shared_bias(real* A_d,
real* B_d,
const int channel,
const int dimM,
const int dimN,
real scale) {
const int blocks = 512;
const int grids = DIVUP(dimM * dimN, blocks);
hipLaunchKernelGGL(( KeMatrixAddSharedBias), dim3(grids), dim3(blocks), 0, STREAM_DEFAULT,
A_d, B_d, channel, dimM, dimN, scale);
CHECK_SYNC("hl_matrix_add_shared_bias failed");
}
template <int blockSize>
__global__ void KeMatrixCollectSharedBias(real* B,
real* A,
const int channel,
const int M,
const int N,
const int dim,
const int limit,
real scale) {
if (dim < limit) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < channel) {
real sum = 0.0;
for (int i = 0; i < M; ++i) {
for (int j = 0; j < dim; ++j) {
sum += A[i * N + index * dim + j];
}
}
B[index] += scale * sum;
}
} else {
const int tid = threadIdx.x;
const int bid = blockIdx.x;
__shared__ real smem[blockSize];
real sum = 0.0;
for (int j = 0; j < ((dim * M + blockSize - 1) / blockSize); ++j) {
int n = j * blockSize + tid;
int m = n / dim;
int w = n % dim;
smem[tid] = (m < M && w < dim) ? A[m * N + bid * dim + w] : 0.0;
__syncthreads();
simpleReduce(smem, tid, blockSize);
sum += smem[0];
}
if (tid == 0) {
B[bid] += scale * sum;
}
}
}
void hl_matrix_collect_shared_bias(real* B_d,
real* A_d,
const int channel,
const int dimM,
const int dimN,
real scale) {
const int dim = dimN / channel;
const int blocks = 256;
const int limit = 64;
int grids = (dimM * dim) < limit ? DIVUP(channel, blocks) : channel;
hipLaunchKernelGGL(( KeMatrixCollectSharedBias<blocks>), dim3(grids), dim3(blocks), 0, STREAM_DEFAULT,
B_d, A_d, channel, dimM, dimN, dim, limit, scale);
CHECK_SYNC("hl_matrix_collect_shared_bias failed");
}
__global__ void keMatrixRotate(
real* mat, real* matRot, int dimM, int dimN, bool clockWise) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < dimM * dimN) {
int i = idx / dimN;
int j = idx % dimN;
if (clockWise) {
matRot[j * dimM + i] = mat[(dimM - i - 1) * dimN + j];
} else {
matRot[j * dimM + i] = mat[i * dimN + (dimN - j - 1)];
}
}
}
void hl_matrix_rotate(
real* mat, real* matRot, int dimM, int dimN, bool clockWise) {
CHECK_NOTNULL(mat);
CHECK_NOTNULL(matRot);
const int threads = 512;
const int blocks = DIVUP(dimM * dimN, threads);
hipLaunchKernelGGL(( keMatrixRotate), dim3(blocks), dim3(threads), 0, STREAM_DEFAULT,
mat, matRot, dimM, dimN, clockWise);
CHECK_SYNC("hl_matrix_rotate failed");
}
__global__ void keMatrixVol2Col(int num_kernels,
const real* dataSrc,
real* dataDst,
int depth,
int height,
int width,
int filterD,
int filterH,
int filterW,
int strideD,
int strideH,
int strideW,
int paddingD,
int paddingH,
int paddingW,
int depth_col,
int height_col,
int width_col) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels;
index += blockDim.x * gridDim.x) {
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int d_out = (index / width_col / height_col) % depth_col;
int channel_in = index / width_col / height_col / depth_col;
int channel_out = channel_in * filterD * filterH * filterW;
int w_in = w_out * strideW - paddingW;
int h_in = h_out * strideH - paddingH;
int d_in = d_out * strideD - paddingD;
dataDst +=
((channel_out * depth_col + d_out) * height_col + h_out) * width_col +
w_out;
dataSrc += ((channel_in * depth + d_in) * height + h_in) * width + w_in;
for (int k = 0; k < filterD; ++k) {
for (int i = 0; i < filterH; ++i) {
for (int j = 0; j < filterW; ++j) {
int d = d_in + k;
int h = h_in + i;
int w = w_in + j;
*dataDst = (d >= 0 && d < depth && h >= 0 && h < height && w >= 0 &&
w < width)
? dataSrc[(k * height + i) * width + j]
: 0;
dataDst += depth_col * height_col * width_col;
}
}
}
}
}
void hl_matrix_vol2Col(const real* dataSrc,
int channels,
int depth,
int height,
int width,
int filterD,
int filterH,
int filterW,
int strideD,
int strideH,
int strideW,
int paddingD,
int paddingH,
int paddingW,
real* dataDst) {
int depth_col = (depth + 2 * paddingD - filterD) / strideD + 1;
int height_col = (height + 2 * paddingH - filterH) / strideH + 1;
int width_col = (width + 2 * paddingW - filterW) / strideW + 1;
int num_kernels = channels * depth_col * height_col * width_col;
const int threads = 512;
const int blocks = DIVUP(num_kernels, threads);
hipLaunchKernelGGL(( keMatrixVol2Col), dim3(blocks), dim3(threads), 0, STREAM_DEFAULT, num_kernels,
dataSrc,
dataDst,
depth,
height,
width,
filterD,
filterH,
filterW,
strideD,
strideH,
strideW,
paddingD,
paddingH,
paddingW,
depth_col,
height_col,
width_col);
CHECK_SYNC("hl_matrix_vol2Col failed");
}
__global__ void keMatrixCol2Vol(int num_kernels,
real* dataDst,
const real* dataSrc,
int depth,
int height,
int width,
int filterD,
int filterH,
int filterW,
int strideD,
int strideH,
int strideW,
int paddingD,
int paddingH,
int paddingW,
int depth_col,
int height_col,
int width_col,
real alpha,
real beta) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels;
index += blockDim.x * gridDim.x) {
real srcVal = 0;
real dstVal = dataDst[index];
int w = index % width + paddingW;
int h = (index / width) % height + paddingH;
int d = (index / width / height) % depth + paddingD;
int c = index / width / height / depth;
// compute the start and end of the output
int w_col_start = (w < filterW) ? 0 : (w - filterW) / strideW + 1;
int w_col_end = min(w / strideW + 1, width_col);
int h_col_start = (h < filterH) ? 0 : (h - filterH) / strideH + 1;
int h_col_end = min(h / strideH + 1, height_col);
int d_col_start = (d < filterD) ? 0 : (d - filterD) / strideD + 1;
int d_col_end = min(d / strideD + 1, depth_col);
int offset = (c * filterD * filterW * filterH + d * filterW * filterH +
h * filterW + w) *
depth_col * height_col * width_col;
int coeff_d_col =
(1 - strideD * filterW * filterH * depth_col) * height_col * width_col;
int coeff_h_col =
(1 - strideH * filterW * depth_col * height_col) * width_col;
int coeff_w_col = (1 - strideW * depth_col * height_col * width_col);
for (int d_col = d_col_start; d_col < d_col_end; ++d_col) {
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
srcVal += dataSrc[offset + d_col * coeff_d_col + h_col * coeff_h_col +
w_col * coeff_w_col];
}
}
}
dataDst[index] = alpha * srcVal + beta * dstVal;
}
}
void hl_matrix_col2Vol(real* dataDst,
int channels,
int depth,
int height,
int width,
int filterD,
int filterH,
int filterW,
int strideD,
int strideH,
int strideW,
int paddingD,
int paddingH,
int paddingW,
const real* dataSrc,
real alpha,
real beta) {
int depth_col = (depth + 2 * paddingD - filterD) / strideD + 1;
int height_col = (height + 2 * paddingH - filterH) / strideH + 1;
int width_col = (width + 2 * paddingW - filterW) / strideW + 1;
int num_kernels = channels * depth * height * width;
const int threads = 512;
const int blocks = DIVUP(num_kernels, threads);
hipLaunchKernelGGL(( keMatrixCol2Vol), dim3(blocks), dim3(threads), 0, STREAM_DEFAULT, num_kernels,
dataDst,
dataSrc,
depth,
height,
width,
filterD,
filterH,
filterW,
strideD,
strideH,
strideW,
paddingD,
paddingH,
paddingW,
depth_col,
height_col,
width_col,
alpha,
beta);
CHECK_SYNC("hl_matrix_col2Vol failed");
}
__global__ void keVectorCast2Int(int* out, real* vec, int size) {
for (int i = threadIdx.x; i < (size); i += blockDim.x) {
out[i] = int(vec[i]);
}
}
void hl_vector_cast2int(int* out, real* vec, int size) {
hipLaunchKernelGGL(( keVectorCast2Int), dim3(1), dim3(512), 0, STREAM_DEFAULT, out, vec, size);
CHECK_SYNC("hl_vector_cast2int failed");
}
|
1e41e13d695585f38f8ff820320581277789be9c.cu
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "hl_base.h"
#include "hl_device_functions.cuh"
#include "hl_gpu_matrix_kernel.cuh"
#include "hl_matrix.h"
#include "hl_matrix_apply.cuh"
#include "hl_matrix_ops.cuh"
#include "hl_sequence.h"
#include "hl_sparse.ph"
#include "paddle/utils/Logging.h"
DEFINE_MATRIX_UNARY_OP(Zero, a = 0);
DEFINE_MATRIX_TERNARY_PARAMETER_OP(_add, TWO_PARAMETER, c = p1 * a + p2 * b);
void hl_matrix_add(real* A_d,
real* B_d,
real* C_d,
int dimM,
int dimN,
real alpha,
real beta) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(B_d);
CHECK_NOTNULL(C_d);
hl_gpu_apply_ternary_op<real, ternary::_add<real>, 0, 0>(
ternary::_add<real>(alpha, beta),
A_d,
B_d,
C_d,
dimM,
dimN,
dimN,
dimN,
dimN);
CHECK_SYNC("hl_matrix_add failed");
}
#ifdef PADDLE_TYPE_DOUBLE
#define THRESHOLD 128
#else
#define THRESHOLD 64
#endif
__device__ __forceinline__ void findMax(real* I,
real* dfMax_s,
int blockSize,
int base,
int curIdx,
int nextIdx,
int dimN,
real* max) {
dfMax_s[base] = -1.0e20;
while (curIdx < dimN) {
if (dfMax_s[base] < I[nextIdx]) {
dfMax_s[base] = I[nextIdx];
}
nextIdx += blockSize;
curIdx += blockSize;
}
__syncthreads();
for (int stride = blockSize >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (base < stride) {
nextIdx = base + stride;
if (dfMax_s[base] < dfMax_s[nextIdx]) {
dfMax_s[base] = dfMax_s[nextIdx];
}
}
}
if (0 == base) {
max[0] = dfMax_s[0];
}
__syncthreads();
}
__device__ __forceinline__ void subMaxAndExp(real* I,
real* O,
int curIdx,
int nextIdx,
int blockSize,
int dimN,
real max) {
real val;
while (curIdx < dimN) {
val = I[nextIdx] - max;
if (val < -THRESHOLD) {
val = -THRESHOLD;
}
I[nextIdx] = val;
#ifndef PADDLE_TYPE_DOUBLE
O[nextIdx] = __expf(val);
#else
O[nextIdx] = exp(val);
#endif
nextIdx += blockSize;
curIdx += blockSize;
}
__syncthreads();
}
__device__ __forceinline__ void valueSum(real* O,
real* dfMax_s,
int blockSize,
int base,
int curIdx,
int nextIdx,
int dimN) {
dfMax_s[base] = 0;
while (curIdx < dimN) {
dfMax_s[base] += O[nextIdx];
nextIdx += blockSize;
curIdx += blockSize;
}
__syncthreads();
for (int stride = blockSize >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (base < stride) {
nextIdx = base + stride;
dfMax_s[base] += dfMax_s[nextIdx];
}
}
__syncthreads();
}
__device__ __forceinline__ void divSum(
real* O, real sum, int curIdx, int nextIdx, int blockSize, int dimN) {
while (curIdx < dimN) {
O[nextIdx] /= sum;
nextIdx += blockSize;
curIdx += blockSize;
}
}
__device__ __forceinline__ void softmax(real* I,
real* O,
real* dfMax_s,
int blockSize,
int base,
int curIdx,
int nextIdx,
int dimN) {
__shared__ real max;
// find the max number
findMax(I, dfMax_s, blockSize, base, curIdx, nextIdx, dimN, &max);
// sub max Value and do Exp operation
subMaxAndExp(I, O, base, nextIdx, blockSize, dimN, max);
// add dimN values into blockDim.x buffer
// sum is in dfMax_s[0]
valueSum(O, dfMax_s, blockSize, base, curIdx, nextIdx, dimN);
// divided by sum
divSum(O, dfMax_s[0], curIdx, nextIdx, blockSize, dimN);
}
template <int blockSize>
__global__ void KeMatrixSoftMax(real* O, real* I, int dimN) {
int base = threadIdx.x;
__shared__ real dfMax_s[blockSize];
int nextIdx = blockIdx.x * dimN + base;
int curIdx = base;
softmax(I, O, dfMax_s, blockSize, base, curIdx, nextIdx, dimN);
}
void hl_matrix_softmax(real* A_d, real* C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
dim3 block(512, 1);
dim3 grid(dimM, 1);
KeMatrixSoftMax<512><<<grid, block, 0, STREAM_DEFAULT>>>(C_d, A_d, dimN);
CHECK_SYNC("hl_matrix_softmax failed");
}
template <int blockSize>
__global__ void KeSequenceSoftMax(real* O, real* I, const int* index) {
int base = threadIdx.x;
int bid = blockIdx.x;
__shared__ real dfMax_s[blockSize];
int start = index[bid];
int dimN = index[bid + 1] - start;
int nextIdx = start + base;
int curIdx = base;
softmax(I, O, dfMax_s, blockSize, base, curIdx, nextIdx, dimN);
}
void hl_sequence_softmax_forward(real* A_d,
real* C_d,
const int* index,
int numSequence) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
dim3 block(512, 1);
dim3 grid(numSequence, 1);
KeSequenceSoftMax<512><<<grid, block, 0, STREAM_DEFAULT>>>(C_d, A_d, index);
CHECK_SYNC("hl_sequence_softmax_forward failed");
}
__global__ void KeMatrixDerivative(
real* grad_d, real* output_d, real* sftmaxSum_d, int dimM, int dimN) {
int rowIdx = blockIdx.x * blockDim.x + threadIdx.x;
int colIdx = blockIdx.y * blockDim.y + threadIdx.y;
int index;
if (rowIdx < dimM && colIdx < dimN) {
index = rowIdx * dimN + colIdx;
grad_d[index] = output_d[index] * (grad_d[index] - sftmaxSum_d[rowIdx]);
}
}
void hl_matrix_softmax_derivative(
real* grad_d, real* output_d, real* sftmaxSum_d, int dimM, int dimN) {
CHECK_NOTNULL(grad_d);
CHECK_NOTNULL(output_d);
CHECK_NOTNULL(sftmaxSum_d);
int blocksX = (dimM + 0) / 1;
int blocksY = (dimN + 1024 - 1) / 1024;
dim3 threads(1, 1024);
dim3 grid(blocksX, blocksY);
KeMatrixDerivative<<<grid, threads, 0, STREAM_DEFAULT>>>(
grad_d, output_d, sftmaxSum_d, dimM, dimN);
CHECK_SYNC("hl_matrix_softmax_derivative failed");
}
__global__ void KeMatrixMultiBinaryCrossEntropy(
real* output, real* entropy, int* row, int* col, int dimM, int dimN) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < dimM) {
for (int i = 0; i < dimN; i++) {
entropy[index] -= log(1 - output[index * dimN + i]);
}
int* row_col = col + row[index];
int col_num = row[index + 1] - row[index];
for (int i = 0; i < col_num; i++) {
real o = output[index * dimN + row_col[i]];
entropy[index] -= log(o / (1 - o));
}
}
}
void hl_matrix_multi_binary_cross_entropy(real* output,
real* entropy,
hl_sparse_matrix_s csr_mat,
int dimM,
int dimN) {
CHECK_NOTNULL(output);
CHECK_NOTNULL(entropy);
CHECK_NOTNULL(csr_mat);
CHECK_EQ(csr_mat->format, HL_SPARSE_CSR);
int n_threads = 1024;
int blocks = (dimM + n_threads - 1) / n_threads;
dim3 threads(n_threads);
dim3 grid(blocks);
hl_csr_matrix mat = (hl_csr_matrix)(csr_mat->matrix);
KeMatrixMultiBinaryCrossEntropy<<<grid, threads, 0, STREAM_DEFAULT>>>(
output, entropy, mat->csr_row, mat->csr_col, dimM, dimN);
CHECK_SYNC("hl_matrix_multi_binary_cross_entropy failed");
}
__global__ void KeMatrixMultiBinaryCrossEntropyBp(
real* output, real* grad, int* row, int* col, int dimM, int dimN) {
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (row_idx < dimM) {
for (int i = 0; i < dimN; i++) {
int index = row_idx * dimN + i;
grad[index] += 1.0 / (1 - output[index]);
}
int col_num = row[row_idx + 1] - row[row_idx];
int* row_col = col + row[row_idx];
for (int i = 0; i < col_num; i++) {
int index = row_idx * dimN + row_col[i];
grad[index] -= 1.0 / (output[index] * (1 - output[index]));
}
}
}
void hl_matrix_multi_binary_cross_entropy_bp(
real* output, real* grad, hl_sparse_matrix_s csr_mat, int dimM, int dimN) {
CHECK_NOTNULL(output);
CHECK_NOTNULL(grad);
CHECK_NOTNULL(csr_mat);
CHECK_EQ(csr_mat->format, HL_SPARSE_CSR);
int n_threads = 1024;
int blocks = (dimM + n_threads - 1) / n_threads;
dim3 threads(n_threads);
dim3 grid(blocks);
hl_csr_matrix mat = (hl_csr_matrix)(csr_mat->matrix);
KeMatrixMultiBinaryCrossEntropyBp<<<grid, threads, 0, STREAM_DEFAULT>>>(
output, grad, mat->csr_row, mat->csr_col, dimM, dimN);
CHECK_SYNC("hl_matrix_multi_binary_cross_entropy_bp failed");
}
__global__ void KeMatrixCrossEntropy(
real* O, real* E, int* label, int dimM, int dimN) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int newBase;
if (index < dimM) {
newBase = label[index];
newBase = newBase % dimN;
E[index] = -log(O[index * dimN + newBase]);
}
}
void hl_matrix_cross_entropy(
real* A_d, real* C_d, int* label_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
int blocks = (dimM + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KeMatrixCrossEntropy<<<grid, threads, 0, STREAM_DEFAULT>>>(
A_d, C_d, label_d, dimM, dimN);
CHECK_SYNC("hl_matrix_cross_entropy failed");
}
__global__ void KeMatrixCrossEntropyBp(
real* grad_d, real* output_d, int* label_d, int dimM, int dimN) {
int rowIdx = blockIdx.x * blockDim.x + threadIdx.x;
int colIdx = blockIdx.y * blockDim.y + threadIdx.y;
int index;
if (rowIdx < dimM && colIdx < dimN) {
index = rowIdx * dimN + colIdx;
if (label_d[rowIdx] == colIdx) {
grad_d[index] -= 1.0f / output_d[index];
}
}
}
void hl_matrix_cross_entropy_bp(
real* grad_d, real* output_d, int* label_d, int dimM, int dimN) {
CHECK_NOTNULL(grad_d);
CHECK_NOTNULL(output_d);
CHECK_NOTNULL(label_d);
int blocksX = (dimM + 0) / 1;
int blocksY = (dimN + 1024 - 1) / 1024;
dim3 threads(1, 1024);
dim3 grid(blocksX, blocksY);
KeMatrixCrossEntropyBp<<<grid, threads, 0, STREAM_DEFAULT>>>(
grad_d, output_d, label_d, dimM, dimN);
CHECK_SYNC("hl_matrix_cross_entropy_bp failed");
}
void hl_matrix_zero_mem(real* data, int num) {
hl_gpu_apply_unary_op(unary::Zero<real>(), data, 1, num, num);
}
__global__ void KeParamReluForward(real* output,
real* input,
real* w,
int width,
int height,
int partial_sum) {
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx < width && ty < height) {
int index = ty * width + tx;
output[index] =
input[index] > 0 ? input[index] : input[index] * w[tx / partial_sum];
}
}
void hl_param_relu_forward(real* output,
real* input,
real* w,
int width,
int height,
int partial_sum) {
CHECK_NOTNULL(output);
CHECK_NOTNULL(input);
CHECK_NOTNULL(w);
dim3 threads(16, 16);
int blockX = (width + 16 - 1) / 16;
int blockY = (height + 16 - 1) / 16;
dim3 grid(blockX, blockY);
KeParamReluForward<<<grid, threads, 0, STREAM_DEFAULT>>>(
output, input, w, width, height, partial_sum);
CHECK_SYNC("hl_param_relu_forward failed");
}
template <int blockSize>
__global__ void KeParamReluBackWardW(real* grad_w,
real* grad_o,
real* input,
int width,
int height,
int partial_sum) {
const int tid = threadIdx.x;
__shared__ real temp[blockSize];
grad_o += partial_sum * blockIdx.x;
input += partial_sum * blockIdx.x;
real tmp = 0.0;
for (int index = tid; index < partial_sum * height; index += blockSize) {
int row = index / partial_sum;
int offset = row * width + (index - row * partial_sum);
if (input[offset] < 0) {
tmp += grad_o[offset] * input[offset];
}
}
temp[tid] = tmp;
__syncthreads();
for (int s = blockSize / 2; s > 0; s >>= 1) {
if (tid < s) {
temp[tid] += temp[tid + s];
}
__syncthreads();
}
if (tid == 0) {
grad_w[blockIdx.x] += temp[0];
}
}
void hl_param_relu_backward_w(real* grad_w,
real* grad_o,
real* input,
int width,
int height,
int partial_sum) {
CHECK_NOTNULL(grad_w);
CHECK_NOTNULL(grad_o);
CHECK_NOTNULL(input);
const int blockSize = 1024;
int grid_num = width / partial_sum;
dim3 threads(blockSize, 1);
dim3 grid(grid_num, 1);
KeParamReluBackWardW<blockSize><<<grid, threads, 0, STREAM_DEFAULT>>>(
grad_w, grad_o, input, width, height, partial_sum);
CHECK_SYNC("hl_param_relu_backward_w failed");
}
__global__ void KeParamReluBackwardDiff(real* grad_o,
real* input,
real* w,
real* diff,
int width,
int height,
int partial_sum) {
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx < width && ty < height) {
int index = ty * width + tx;
diff[index] += grad_o[index] * (input[index] > 0 ? 1 : w[tx / partial_sum]);
}
}
void hl_param_relu_backward_diff(real* grad_o,
real* data,
real* w,
real* diff,
int width,
int height,
int partial_sum) {
CHECK_NOTNULL(grad_o);
CHECK_NOTNULL(data);
CHECK_NOTNULL(w);
CHECK_NOTNULL(diff);
dim3 threads(16, 16);
int blockX = (width + 16 - 1) / 16;
int blockY = (height + 16 - 1) / 16;
dim3 grid(blockX, blockY);
KeParamReluBackwardDiff<<<grid, threads, 0, STREAM_DEFAULT>>>(
grad_o, data, w, diff, width, height, partial_sum);
CHECK_SYNC("hl_param_relu_backward_diff failed");
}
__global__ void KeMatrixAddSharedBias(
real* A, real* B, const int channel, const int M, const int N, real scale) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int dim = N / channel;
if (index < M * N) {
int i = index % N;
i = i / dim;
A[index] += scale * B[i];
}
}
void hl_matrix_add_shared_bias(real* A_d,
real* B_d,
const int channel,
const int dimM,
const int dimN,
real scale) {
const int blocks = 512;
const int grids = DIVUP(dimM * dimN, blocks);
KeMatrixAddSharedBias<<<grids, blocks, 0, STREAM_DEFAULT>>>(
A_d, B_d, channel, dimM, dimN, scale);
CHECK_SYNC("hl_matrix_add_shared_bias failed");
}
template <int blockSize>
__global__ void KeMatrixCollectSharedBias(real* B,
real* A,
const int channel,
const int M,
const int N,
const int dim,
const int limit,
real scale) {
if (dim < limit) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < channel) {
real sum = 0.0;
for (int i = 0; i < M; ++i) {
for (int j = 0; j < dim; ++j) {
sum += A[i * N + index * dim + j];
}
}
B[index] += scale * sum;
}
} else {
const int tid = threadIdx.x;
const int bid = blockIdx.x;
__shared__ real smem[blockSize];
real sum = 0.0;
for (int j = 0; j < ((dim * M + blockSize - 1) / blockSize); ++j) {
int n = j * blockSize + tid;
int m = n / dim;
int w = n % dim;
smem[tid] = (m < M && w < dim) ? A[m * N + bid * dim + w] : 0.0;
__syncthreads();
simpleReduce(smem, tid, blockSize);
sum += smem[0];
}
if (tid == 0) {
B[bid] += scale * sum;
}
}
}
void hl_matrix_collect_shared_bias(real* B_d,
real* A_d,
const int channel,
const int dimM,
const int dimN,
real scale) {
const int dim = dimN / channel;
const int blocks = 256;
const int limit = 64;
int grids = (dimM * dim) < limit ? DIVUP(channel, blocks) : channel;
KeMatrixCollectSharedBias<blocks><<<grids, blocks, 0, STREAM_DEFAULT>>>(
B_d, A_d, channel, dimM, dimN, dim, limit, scale);
CHECK_SYNC("hl_matrix_collect_shared_bias failed");
}
__global__ void keMatrixRotate(
real* mat, real* matRot, int dimM, int dimN, bool clockWise) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < dimM * dimN) {
int i = idx / dimN;
int j = idx % dimN;
if (clockWise) {
matRot[j * dimM + i] = mat[(dimM - i - 1) * dimN + j];
} else {
matRot[j * dimM + i] = mat[i * dimN + (dimN - j - 1)];
}
}
}
void hl_matrix_rotate(
real* mat, real* matRot, int dimM, int dimN, bool clockWise) {
CHECK_NOTNULL(mat);
CHECK_NOTNULL(matRot);
const int threads = 512;
const int blocks = DIVUP(dimM * dimN, threads);
keMatrixRotate<<<blocks, threads, 0, STREAM_DEFAULT>>>(
mat, matRot, dimM, dimN, clockWise);
CHECK_SYNC("hl_matrix_rotate failed");
}
__global__ void keMatrixVol2Col(int num_kernels,
const real* dataSrc,
real* dataDst,
int depth,
int height,
int width,
int filterD,
int filterH,
int filterW,
int strideD,
int strideH,
int strideW,
int paddingD,
int paddingH,
int paddingW,
int depth_col,
int height_col,
int width_col) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels;
index += blockDim.x * gridDim.x) {
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int d_out = (index / width_col / height_col) % depth_col;
int channel_in = index / width_col / height_col / depth_col;
int channel_out = channel_in * filterD * filterH * filterW;
int w_in = w_out * strideW - paddingW;
int h_in = h_out * strideH - paddingH;
int d_in = d_out * strideD - paddingD;
dataDst +=
((channel_out * depth_col + d_out) * height_col + h_out) * width_col +
w_out;
dataSrc += ((channel_in * depth + d_in) * height + h_in) * width + w_in;
for (int k = 0; k < filterD; ++k) {
for (int i = 0; i < filterH; ++i) {
for (int j = 0; j < filterW; ++j) {
int d = d_in + k;
int h = h_in + i;
int w = w_in + j;
*dataDst = (d >= 0 && d < depth && h >= 0 && h < height && w >= 0 &&
w < width)
? dataSrc[(k * height + i) * width + j]
: 0;
dataDst += depth_col * height_col * width_col;
}
}
}
}
}
void hl_matrix_vol2Col(const real* dataSrc,
int channels,
int depth,
int height,
int width,
int filterD,
int filterH,
int filterW,
int strideD,
int strideH,
int strideW,
int paddingD,
int paddingH,
int paddingW,
real* dataDst) {
int depth_col = (depth + 2 * paddingD - filterD) / strideD + 1;
int height_col = (height + 2 * paddingH - filterH) / strideH + 1;
int width_col = (width + 2 * paddingW - filterW) / strideW + 1;
int num_kernels = channels * depth_col * height_col * width_col;
const int threads = 512;
const int blocks = DIVUP(num_kernels, threads);
keMatrixVol2Col<<<blocks, threads, 0, STREAM_DEFAULT>>>(num_kernels,
dataSrc,
dataDst,
depth,
height,
width,
filterD,
filterH,
filterW,
strideD,
strideH,
strideW,
paddingD,
paddingH,
paddingW,
depth_col,
height_col,
width_col);
CHECK_SYNC("hl_matrix_vol2Col failed");
}
__global__ void keMatrixCol2Vol(int num_kernels,
real* dataDst,
const real* dataSrc,
int depth,
int height,
int width,
int filterD,
int filterH,
int filterW,
int strideD,
int strideH,
int strideW,
int paddingD,
int paddingH,
int paddingW,
int depth_col,
int height_col,
int width_col,
real alpha,
real beta) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels;
index += blockDim.x * gridDim.x) {
real srcVal = 0;
real dstVal = dataDst[index];
int w = index % width + paddingW;
int h = (index / width) % height + paddingH;
int d = (index / width / height) % depth + paddingD;
int c = index / width / height / depth;
// compute the start and end of the output
int w_col_start = (w < filterW) ? 0 : (w - filterW) / strideW + 1;
int w_col_end = min(w / strideW + 1, width_col);
int h_col_start = (h < filterH) ? 0 : (h - filterH) / strideH + 1;
int h_col_end = min(h / strideH + 1, height_col);
int d_col_start = (d < filterD) ? 0 : (d - filterD) / strideD + 1;
int d_col_end = min(d / strideD + 1, depth_col);
int offset = (c * filterD * filterW * filterH + d * filterW * filterH +
h * filterW + w) *
depth_col * height_col * width_col;
int coeff_d_col =
(1 - strideD * filterW * filterH * depth_col) * height_col * width_col;
int coeff_h_col =
(1 - strideH * filterW * depth_col * height_col) * width_col;
int coeff_w_col = (1 - strideW * depth_col * height_col * width_col);
for (int d_col = d_col_start; d_col < d_col_end; ++d_col) {
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
srcVal += dataSrc[offset + d_col * coeff_d_col + h_col * coeff_h_col +
w_col * coeff_w_col];
}
}
}
dataDst[index] = alpha * srcVal + beta * dstVal;
}
}
void hl_matrix_col2Vol(real* dataDst,
int channels,
int depth,
int height,
int width,
int filterD,
int filterH,
int filterW,
int strideD,
int strideH,
int strideW,
int paddingD,
int paddingH,
int paddingW,
const real* dataSrc,
real alpha,
real beta) {
int depth_col = (depth + 2 * paddingD - filterD) / strideD + 1;
int height_col = (height + 2 * paddingH - filterH) / strideH + 1;
int width_col = (width + 2 * paddingW - filterW) / strideW + 1;
int num_kernels = channels * depth * height * width;
const int threads = 512;
const int blocks = DIVUP(num_kernels, threads);
keMatrixCol2Vol<<<blocks, threads, 0, STREAM_DEFAULT>>>(num_kernels,
dataDst,
dataSrc,
depth,
height,
width,
filterD,
filterH,
filterW,
strideD,
strideH,
strideW,
paddingD,
paddingH,
paddingW,
depth_col,
height_col,
width_col,
alpha,
beta);
CHECK_SYNC("hl_matrix_col2Vol failed");
}
__global__ void keVectorCast2Int(int* out, real* vec, int size) {
for (int i = threadIdx.x; i < (size); i += blockDim.x) {
out[i] = int(vec[i]);
}
}
void hl_vector_cast2int(int* out, real* vec, int size) {
keVectorCast2Int<<<1, 512, 0, STREAM_DEFAULT>>>(out, vec, size);
CHECK_SYNC("hl_vector_cast2int failed");
}
|
1a8199dec490ae235b23c848827890d087701260.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* TorsionSolveOnDevice.cu
*
* Created on 11/7/2017
* Author: SRB
*/
#include "NodeSystemDevice.h"
#include "TorsionSolveOnDevice.h"
void TorsionSolveOnDevice(
NodeInfoVecs& nodeInfoVecs,
TorsionInfoVecs& torsionInfoVecs,
GeneralParams& generalParams) {
const double PI = 3.14159265358979323846;
if (generalParams.totalTorsionCount>0) {
thrust::counting_iterator<unsigned> startTorsionIter(0);
thrust::counting_iterator<unsigned> endTorsionIter(generalParams.totalTorsionCount);
//for_each guarrantees order. This is needed for iter count and saving to torsion force vectors.
//forces are filled using 3 counters left = counter, center = counter + totalTorsionCount etc.
//Thus, in the force vector, only the first 3*totalTorsionCount entries are filled.
thrust::for_each(
thrust::make_zip_iterator(
thrust::make_tuple(
startTorsionIter,
torsionInfoVecs.leftIndex.begin(),
torsionInfoVecs.centerIndex.begin(),
torsionInfoVecs.rightIndex.begin(),
torsionInfoVecs.angleZero.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
startTorsionIter,
torsionInfoVecs.leftIndex.begin(),
torsionInfoVecs.centerIndex.begin(),
torsionInfoVecs.rightIndex.begin(),
torsionInfoVecs.angleZero.begin())) + generalParams.totalTorsionCount,
TorsionFunctor(
thrust::raw_pointer_cast(nodeInfoVecs.nodeLocX.data()),
thrust::raw_pointer_cast(nodeInfoVecs.nodeLocY.data()),
thrust::raw_pointer_cast(nodeInfoVecs.nodeLocZ.data()),
thrust::raw_pointer_cast(torsionInfoVecs.forceX.data()),
thrust::raw_pointer_cast(torsionInfoVecs.forceY.data()),
thrust::raw_pointer_cast(torsionInfoVecs.forceZ.data()),
thrust::raw_pointer_cast(nodeInfoVecs.isNodeFixed.data()),
generalParams.torsionStiffness,
generalParams.maxNodeCount,
generalParams.totalTorsionCount,
PI));
hipDeviceSynchronize();
//reduce by key to get forces.Notice leftIndex is 1/3rd the length of torsion.forceX
//this vector will be sorted each iteration, so it needs to be recopied unfortunately.
//fill must end before non-set id's
thrust::copy(torsionInfoVecs.leftIndex.begin(), torsionInfoVecs.leftIndex.begin() + generalParams.totalTorsionCount,
torsionInfoVecs.tempTorIndices.begin());
thrust::copy(torsionInfoVecs.centerIndex.begin(), torsionInfoVecs.centerIndex.begin() + generalParams.totalTorsionCount,
torsionInfoVecs.tempTorIndices.begin() + generalParams.totalTorsionCount);
thrust::copy(torsionInfoVecs.rightIndex.begin(), torsionInfoVecs.rightIndex.begin() + generalParams.totalTorsionCount,
torsionInfoVecs.tempTorIndices.begin() + 2 * generalParams.totalTorsionCount);
//key, then value. Each vector returns sorted
thrust::sort_by_key(torsionInfoVecs.tempTorIndices.begin(), torsionInfoVecs.tempTorIndices.begin() + 3 * generalParams.totalTorsionCount,
thrust::make_zip_iterator(
thrust::make_tuple(
torsionInfoVecs.forceX.begin(),
torsionInfoVecs.forceY.begin(),
torsionInfoVecs.forceZ.begin())), thrust::less<unsigned>());
thrust::fill(torsionInfoVecs.tempForceX.begin(), torsionInfoVecs.tempForceX.end(), 0);
thrust::fill(torsionInfoVecs.tempForceY.begin(), torsionInfoVecs.tempForceY.end(), 0);
thrust::fill(torsionInfoVecs.tempForceZ.begin(), torsionInfoVecs.tempForceZ.end(), 0);
thrust::fill(torsionInfoVecs.reducedIds.begin(), torsionInfoVecs.reducedIds.end(), 0);
unsigned endKey = thrust::get<0>(
thrust::reduce_by_key(
torsionInfoVecs.tempTorIndices.begin(),
torsionInfoVecs.tempTorIndices.begin() + 3*generalParams.totalTorsionCount,
thrust::make_zip_iterator(
thrust::make_tuple(
torsionInfoVecs.forceX.begin(),
torsionInfoVecs.forceY.begin(),
torsionInfoVecs.forceZ.begin())),
torsionInfoVecs.reducedIds.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(
torsionInfoVecs.tempForceX.begin(),
torsionInfoVecs.tempForceY.begin(),
torsionInfoVecs.tempForceZ.begin())),
thrust::equal_to<unsigned>(), CVec3Add())) - torsionInfoVecs.reducedIds.begin();//binary_pred, binary_op
hipDeviceSynchronize();
//std::cout<<"endkey: "<< endKey << std::endl;
//std::cout<<"totalTorsion: "<< generalParams.totalTorsionCount << std::endl;
thrust::for_each(
thrust::make_zip_iterator(//1st begin
thrust::make_tuple(
torsionInfoVecs.reducedIds.begin(),
torsionInfoVecs.tempForceX.begin(),
torsionInfoVecs.tempForceY.begin(),
torsionInfoVecs.tempForceZ.begin())),
thrust::make_zip_iterator(//1st end
thrust::make_tuple(
torsionInfoVecs.reducedIds.begin(),
torsionInfoVecs.tempForceX.begin(),
torsionInfoVecs.tempForceY.begin(),
torsionInfoVecs.tempForceZ.begin())) + endKey,
AddTorsionForceFunctor(
generalParams.maxNodeCount,
thrust::raw_pointer_cast(nodeInfoVecs.nodeForceX.data()),
thrust::raw_pointer_cast(nodeInfoVecs.nodeForceY.data()),
thrust::raw_pointer_cast(nodeInfoVecs.nodeForceZ.data())));
}
}
|
1a8199dec490ae235b23c848827890d087701260.cu
|
/*
* TorsionSolveOnDevice.cu
*
* Created on 11/7/2017
* Author: SRB
*/
#include "NodeSystemDevice.h"
#include "TorsionSolveOnDevice.h"
void TorsionSolveOnDevice(
NodeInfoVecs& nodeInfoVecs,
TorsionInfoVecs& torsionInfoVecs,
GeneralParams& generalParams) {
const double PI = 3.14159265358979323846;
if (generalParams.totalTorsionCount>0) {
thrust::counting_iterator<unsigned> startTorsionIter(0);
thrust::counting_iterator<unsigned> endTorsionIter(generalParams.totalTorsionCount);
//for_each guarrantees order. This is needed for iter count and saving to torsion force vectors.
//forces are filled using 3 counters left = counter, center = counter + totalTorsionCount etc.
//Thus, in the force vector, only the first 3*totalTorsionCount entries are filled.
thrust::for_each(
thrust::make_zip_iterator(
thrust::make_tuple(
startTorsionIter,
torsionInfoVecs.leftIndex.begin(),
torsionInfoVecs.centerIndex.begin(),
torsionInfoVecs.rightIndex.begin(),
torsionInfoVecs.angleZero.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
startTorsionIter,
torsionInfoVecs.leftIndex.begin(),
torsionInfoVecs.centerIndex.begin(),
torsionInfoVecs.rightIndex.begin(),
torsionInfoVecs.angleZero.begin())) + generalParams.totalTorsionCount,
TorsionFunctor(
thrust::raw_pointer_cast(nodeInfoVecs.nodeLocX.data()),
thrust::raw_pointer_cast(nodeInfoVecs.nodeLocY.data()),
thrust::raw_pointer_cast(nodeInfoVecs.nodeLocZ.data()),
thrust::raw_pointer_cast(torsionInfoVecs.forceX.data()),
thrust::raw_pointer_cast(torsionInfoVecs.forceY.data()),
thrust::raw_pointer_cast(torsionInfoVecs.forceZ.data()),
thrust::raw_pointer_cast(nodeInfoVecs.isNodeFixed.data()),
generalParams.torsionStiffness,
generalParams.maxNodeCount,
generalParams.totalTorsionCount,
PI));
cudaThreadSynchronize();
//reduce by key to get forces.Notice leftIndex is 1/3rd the length of torsion.forceX
//this vector will be sorted each iteration, so it needs to be recopied unfortunately.
//fill must end before non-set id's
thrust::copy(torsionInfoVecs.leftIndex.begin(), torsionInfoVecs.leftIndex.begin() + generalParams.totalTorsionCount,
torsionInfoVecs.tempTorIndices.begin());
thrust::copy(torsionInfoVecs.centerIndex.begin(), torsionInfoVecs.centerIndex.begin() + generalParams.totalTorsionCount,
torsionInfoVecs.tempTorIndices.begin() + generalParams.totalTorsionCount);
thrust::copy(torsionInfoVecs.rightIndex.begin(), torsionInfoVecs.rightIndex.begin() + generalParams.totalTorsionCount,
torsionInfoVecs.tempTorIndices.begin() + 2 * generalParams.totalTorsionCount);
//key, then value. Each vector returns sorted
thrust::sort_by_key(torsionInfoVecs.tempTorIndices.begin(), torsionInfoVecs.tempTorIndices.begin() + 3 * generalParams.totalTorsionCount,
thrust::make_zip_iterator(
thrust::make_tuple(
torsionInfoVecs.forceX.begin(),
torsionInfoVecs.forceY.begin(),
torsionInfoVecs.forceZ.begin())), thrust::less<unsigned>());
thrust::fill(torsionInfoVecs.tempForceX.begin(), torsionInfoVecs.tempForceX.end(), 0);
thrust::fill(torsionInfoVecs.tempForceY.begin(), torsionInfoVecs.tempForceY.end(), 0);
thrust::fill(torsionInfoVecs.tempForceZ.begin(), torsionInfoVecs.tempForceZ.end(), 0);
thrust::fill(torsionInfoVecs.reducedIds.begin(), torsionInfoVecs.reducedIds.end(), 0);
unsigned endKey = thrust::get<0>(
thrust::reduce_by_key(
torsionInfoVecs.tempTorIndices.begin(),
torsionInfoVecs.tempTorIndices.begin() + 3*generalParams.totalTorsionCount,
thrust::make_zip_iterator(
thrust::make_tuple(
torsionInfoVecs.forceX.begin(),
torsionInfoVecs.forceY.begin(),
torsionInfoVecs.forceZ.begin())),
torsionInfoVecs.reducedIds.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(
torsionInfoVecs.tempForceX.begin(),
torsionInfoVecs.tempForceY.begin(),
torsionInfoVecs.tempForceZ.begin())),
thrust::equal_to<unsigned>(), CVec3Add())) - torsionInfoVecs.reducedIds.begin();//binary_pred, binary_op
cudaThreadSynchronize();
//std::cout<<"endkey: "<< endKey << std::endl;
//std::cout<<"totalTorsion: "<< generalParams.totalTorsionCount << std::endl;
thrust::for_each(
thrust::make_zip_iterator(//1st begin
thrust::make_tuple(
torsionInfoVecs.reducedIds.begin(),
torsionInfoVecs.tempForceX.begin(),
torsionInfoVecs.tempForceY.begin(),
torsionInfoVecs.tempForceZ.begin())),
thrust::make_zip_iterator(//1st end
thrust::make_tuple(
torsionInfoVecs.reducedIds.begin(),
torsionInfoVecs.tempForceX.begin(),
torsionInfoVecs.tempForceY.begin(),
torsionInfoVecs.tempForceZ.begin())) + endKey,
AddTorsionForceFunctor(
generalParams.maxNodeCount,
thrust::raw_pointer_cast(nodeInfoVecs.nodeForceX.data()),
thrust::raw_pointer_cast(nodeInfoVecs.nodeForceY.data()),
thrust::raw_pointer_cast(nodeInfoVecs.nodeForceZ.data())));
}
}
|
889b55f0b0c7444a78a17cddbc7e0b0964ed70ff.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* @ image.hppTODO
* @
* @ 2019-4-29
* @ 17:14
* @
*/
#include <hip/hip_runtime.h>
#include <iostream>
#include <vector>
#include "cuda_include/common.cuh"
#include "cuda_include/sharemem.cuh"
#include <cstdio>
template <typename T>
void gpu_cpu2zero1(T *cpu,T *gpu,size_t bytes)
{
memset(cpu, 0, bytes);
hipMemset(gpu,0,bytes);
}
/******************************************************************************************/
///
/*
* kernel_fill_color 702.651us [32,4,1]
* kernel_fill_color3 705.469us [32,16,1]
* kernel_fill_color3_by_share 400.097us [32,4,1]
* kernel_fill_color15_by_share 253.638us [32,4,1]**
*/
///
/*
* dim3 block(x,y,1);
* dim3 grid((kFact_width-1+x)/x,(h-1+y)/y,1);
* kernel_fill_color<T><<<grid,block>>>(d_out,d_color,kFact_width,h,c);
*/
template <typename T>
__global__ void kernelFillColor(T * p_image, T *p_color,int const kFact_width,int const kHeight,int const kChannels)
{
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=y*kFact_width+x;
//
if(x<kFact_width&&y<kHeight)
{
int channels=idx%kChannels;
p_image[idx]=p_color[channels];
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((kFact_width-1+x*3)/(x*3),(h-1+y)/y,1);
* kernel_fill_color3<T><<<grid,block>>>(d_out,d_color,kFact_width,h,c);
*/
template <typename T>
__global__ void kernelFillColor3(T * p_image, T *p_color,int const kFact_width,int const kHeight,int const kChannels)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=y*kFact_width+x;
T local_color[4];
for(int i=0;i<kChannels;i++)
{
local_color[i]=p_color[i];
}
//
if((x+blockDim.x*2)<kFact_width&&y<kHeight)
{
int channels=idx%kChannels;
p_image[idx]=local_color[channels];
idx+=blockDim.x;
channels=idx%kChannels;
p_image[idx]=local_color[channels];
idx+=blockDim.x;
channels=idx%kChannels;
p_image[idx]=local_color[channels];
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((kFact_width-1+x*3)/(x*3),(h-1+y)/y,1);
* kernel_fill_color3_by_share<T><<<grid,block,colorbytes>>>(d_out,d_color,kFact_width,h,c);
*/
template <typename T>
__global__ void kernelFillColorByShare3(T * p_image, T *p_color,int const kFact_width,int const kHeight,int const kChannels)
{
sharedMemory<T> smem;
T* data = smem.getPointer();
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=y*kFact_width+x;
int sidx=threadIdx.y*blockDim.x+threadIdx.x;
if(sidx<kChannels)data[sidx]=p_color[sidx];
__syncthreads();
//
if((x+blockDim.x*2)<kFact_width&&y<kHeight)
{
int channels;
for(int k=0;k<3;k++)
{
channels=idx%kChannels;
p_image[idx]=data[channels];
idx+=blockDim.x;
}
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((kFact_width-1+x*15)/(x*15),(h-1+y)/y,1);
* kernel_fill_color15_by_share<T><<<grid,block,colorbytes>>>(d_out,d_color,kFact_width,h,c);
*/
template <typename T>
__global__ void kernelFillColorByShare15(T * p_image, T *p_color,int const kFact_width,int const kHeight,int const kChannels)
{
sharedMemory<T> smem;
T* data = smem.p_getPointer();
int x=threadIdx.x+blockIdx.x*blockDim.x*15;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=y*kFact_width+x;
int sidx=threadIdx.y*blockDim.x+threadIdx.x;
if(sidx<kChannels)data[sidx]=p_color[sidx];
__syncthreads();
//
if(x<kFact_width&&y<kHeight)
{
int channels;
for(int k=0;k<15;k++)
{
channels=idx%kChannels;
p_image[idx]=data[channels];
idx+=blockDim.x;
}
}
}
///:
/*
* kernel_add_channels 1.131ms [32,4,1]
* kernel_add_channels_stride 507.197us [32,4,1]
* kernel_add_channels_stride2 422.649us [32,4,1]**
*/
///
/*
* dim3 block(x,y,1);
* dim3 grid((w*c_add-1+x)/(x),(h-1+y)/y,1);
* kernel_add_channels<T><<<grid,block>>>(d_out,d_in,w,h,c,num_channels,value);
*/
template <typename T>
__global__ void kernelAddChannel(T *p_dst,T *p_src, int const kWidth,int const kHeight,int const kChannels,int const kNum_channels,T value)
{
int x=threadIdx.x+blockIdx.x*blockDim.x;//x
int y=threadIdx.y+blockIdx.y*blockDim.y;//y
int c_add=kChannels+kNum_channels;
int idx=y*kWidth*c_add+x;//
if(x<kWidth*c_add&&y<kHeight)
{
int channels=idx%c_add;
int pixels=idx/c_add;
if (channels < kChannels) p_dst[idx] = p_src[pixels * kChannels + channels];
else p_dst[idx] = value;
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w-1+x)/(x),(h-1+y)/y,1);
* kernel_add_channels_stride<T><<<grid,block>>>(d_out,d_in,w,h,c,num_channels,value);
*/
template <typename T>
__global__ void kernelAddChannelStride(T *p_dst,T *p_src, int const kWidth,int const kHeight,int const kChannels,int const kNum_channels,T value)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;//x
int y = threadIdx.y + blockIdx.y * blockDim.y;//y
int c_add=kChannels+kNum_channels;
int idx_out = y * kWidth * c_add + x * c_add;//
int idx_in = y * kWidth * kChannels + x * kChannels;//
if (x < kWidth && y < kHeight)
{
for (int i = 0; i <kChannels ; ++i) p_dst[idx_out+i]=p_src[idx_in+i];
for (int j = 0; j <kNum_channels ; ++j) p_dst[idx_out+kChannels+j]=value;
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w-1+x*2)/(x*2),(h-1+y)/y,1);
* kernel_add_channels_stride2<T><<<grid,block>>>(d_out,d_in,w,h,c,num_channels,value);
*/
template <typename T>
__global__ void kernelAddChannelStride2(T *p_dst,T *p_src, int const kWidth,int const kHeight,int const kChannels,int const kNum_channels,T value)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*2;//x
int y=threadIdx.y+blockIdx.y*blockDim.y;//y
int c_add=kChannels+kNum_channels;
int idx_out=y*kWidth*c_add+x*c_add;//
int idx_in=y*kWidth*kChannels+x*kChannels;//
if (x < kWidth && y < kHeight)
{
for (int i = 0; i <kChannels ; ++i)
{
p_dst[idx_out+i]=p_src[idx_in+i];
p_dst[idx_out+blockDim.x*c_add+i]=p_src[idx_in+blockDim.x*kChannels+i];
}
for (int j = 0; j <kNum_channels ; ++j) {
p_dst[idx_out + kChannels + j] = value;
p_dst[idx_out + blockDim.x * c_add + kChannels + j] = value;
}
}
}
///:()
/*
* kernel_add_channels 1.131ms [32,4,1]
* kernel_add_channels_stride 507.197us [32,4,1]
* kernel_add_channels_stride2 422.649us [32,4,1]**
*/
///
/*
* dim3 block(x,y,1);
* dim3 grid((w*c_add-1+x)/(x),(h-1+y)/y,1);
* kernel_add_channels<T><<<grid,block>>>(d_out,d_in,w,h,c,num_channels,d_value,_front_back);
*/
template <typename T>
__global__ void kernelAddChannels(T *p_dst,T *p_src, int const kWidth,int const kHeight,int const kChannels,int const kNum_channels,T * p_value,bool _front_back)
{
int x=threadIdx.x+blockIdx.x*blockDim.x;//x
int y=threadIdx.y+blockIdx.y*blockDim.y;//y
int c_add=kChannels+kNum_channels;
int idx=y*kWidth*c_add+x;//
if(x<kWidth*c_add&&y<kHeight)
{
int channels=idx%c_add;
int pixels=idx/c_add;
if(_front_back)
{
if (channels < kChannels) p_dst[idx] = p_src[pixels * kChannels + channels];
else p_dst[idx] = p_value[channels - kChannels];
}
else
{
if (channels < kNum_channels) p_dst[idx] = p_value[channels];
else p_dst[idx] = p_src[pixels * kChannels + channels - kNum_channels];
}
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w-1+x)/(x),(h-1+y)/y,1);
* kernel_add_channels_stride<T><<<grid,block>>>(d_out,d_in,w,h,c,num_channels,d_value,_front_back);
*/
template <typename T>
__global__ void kernelAddChannelsStride(T *p_dst,T *p_src, int const kWidth,int const kHeight,int const kChannels,int const kNum_channels,T * p_value,bool _front_back)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;//x
int y = threadIdx.y + blockIdx.y * blockDim.y;//y
int c_add=kChannels+kNum_channels;
int idx_out = y * kWidth * c_add + x * c_add;//
int idx_in = y * kWidth * kChannels + x * kChannels;//
if (x < kWidth && y < kHeight)
{
if(_front_back)
{
for (int i = 0; i <kChannels ; ++i) p_dst[idx_out+i]=p_src[idx_in+i];
for (int j = 0; j <kNum_channels ; ++j) p_dst[idx_out+kChannels+j]=p_value[j];
}
else
{
for (int j = 0; j <kNum_channels ; ++j) p_dst[idx_out+j]=p_value[j];
for (int i = 0; i <kChannels ; ++i) p_dst[idx_out+kNum_channels+i]=p_src[idx_in+i];
}
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w-1+x*2)/(x*2),(h-1+y)/y,1);
* kernel_add_channels_stride2<T><<<grid,block>>>(d_out,d_in,w,h,c,num_channels,d_value,_front_back);
*/
template <typename T>
__global__ void kernelAddChannelsStride2(T *p_dst,T *p_src, int const kWidth,int const kHeight,int const kChannels,int const kNum_channels,T * p_value,bool _front_back)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*2;//x
int y=threadIdx.y+blockIdx.y*blockDim.y;//y
int c_add=kChannels+kNum_channels;
int idx_out=y*kWidth*c_add+x*c_add;//
int idx_in=y*kWidth*kChannels+x*kChannels;//
if (x < kWidth && y < kHeight)
{
if(_front_back)
{
for (int i = 0; i <kChannels ; ++i)
{
p_dst[idx_out+i]=p_src[idx_in+i];
p_dst[idx_out+blockDim.x*c_add+i]=p_src[idx_in+blockDim.x*kChannels+i];
}
for (int j = 0; j <kNum_channels ; ++j)
{
p_dst[idx_out+kChannels+j]=p_value[j];
p_dst[idx_out+blockDim.x*c_add+kChannels+j]=p_value[j];
}
}
else
{
for (int j = 0; j <kNum_channels ; ++j)
{
p_dst[idx_out+j]=p_value[j];
p_dst[idx_out+blockDim.x*c_add+j]=p_value[j];
}
for (int i = 0; i <kChannels ; ++i)
{
p_dst[idx_out+kNum_channels+i]=p_src[idx_in+i];
p_dst[idx_out+blockDim.x*c_add+kNum_channels+i]=p_src[idx_in+blockDim.x*kChannels+i];
}
}
}
}
///:
/*
* kernel_swap_channels 283.847us [32,4,1]**
* kernel_swap_channels2 293.352us [32,4,1]
*/
///
/*
* dim3 block(x,y,1);
* dim3 grid((w-1+x)/(x),(h-1+y)/y,1);
* kernel_swap_channels<T><<<grid,block>>>(d_in,w,h,c,swap_c1,swap_c2);
*/
template <typename T>
__global__ void kernelSwapChannels(T *p_src,int const kWidth,int const kHeight,int const kChannels, int const kSwap_c1,int const kSwap_c2)
{
int const x=threadIdx.x+blockDim.x*blockIdx.x;
int const y=threadIdx.y+blockDim.y*blockIdx.y;
int const idx=y*kWidth+x;
if(x<kWidth&&y<kHeight)
{
T a,b;
a=p_src[idx*kChannels+kSwap_c1];
b=p_src[idx*kChannels+kSwap_c2];
p_src[idx*kChannels+kSwap_c1]=b;
p_src[idx*kChannels+kSwap_c2]=a;
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w-1+x)/(x),(h-1+y*2)/(y*2),1);
* kernel_swap_channels2<T><<<grid,block>>>(d_in,w,h,c,swap_c1,swap_c2);
*/
template <typename T>
__global__ void kernelSwapChannels2(T *p_src,int const kWidth,int const kHeight,int const kChannels, int const kSwap_c1,int const kSwap_c2)
{
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y*2;
for(int i=0;i<2;i++)
{
int idx=(y+blockDim.y*i)*kWidth*kChannels+x*kChannels;
if(x<kWidth&&(y+blockDim.y*i)<kHeight)
{
T a,b;
a=p_src[idx*kChannels+kSwap_c1];
b=p_src[idx*kChannels+kSwap_c2];
p_src[idx*kChannels+kSwap_c1]=b;
p_src[idx*kChannels+kSwap_c2]=a;
}
}
}
///:
/*
* kernel_copy_channels 286.692us [32,4,1]**
*/
///
/*
* dim3 block(x,y,1);
* dim3 grid((w-1+x)/(x),(h-1+y)/y,1);
* kernel_copy_channels<T><<<grid,block>>>(d_in,w,h,c,copy_c,paste_c);
*/
template <typename T>
__global__ void kernelCopyChannels(T *p_image,int const kWidth,int const kHeight,int const kChannels,int const kCopy_c,int const kPaste_c)
{
int x=blockDim.x*blockIdx.x+threadIdx.x;
int y=blockDim.y*blockIdx.y+threadIdx.y;
if(x<kWidth&&y<kHeight)
{
int idx=y*kWidth*kChannels+x*kChannels;
T value=p_image[idx+kChannels];
p_image[idx+kPaste_c]=value;
}
}
///:
/*
* kernel_delete_channel 468.206us [32,4,1]
* kernel_delete_channel2 322.506us [32,2,1]**
* kernel_delete_channel3 334.987us [32,2,1]
*/
///
/*
* dim3 block(x,y,1);
* dim3 grid((src_w*dst_c-1+x*5)/(x*5),(src_h-1+y)/y,1);
* kernel_delete_channel<T><<<grid,block>>>(d_out,d_in,src_w,src_h,src_c,dst_c,del_c);
*/
template <typename T>
__global__ void kernelDeleteChannel(T *p_dst,T *p_src,int const kWidth,int const kHeight,int const kChannels,int const kDst_c,int const kDel_c)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*5;
int y=threadIdx.y+blockIdx.y*blockDim.y;
for (int i = 0; i <5 ; ++i) {
if(x<kWidth*kDst_c&&y<kHeight)
{
int idx_out=y*kWidth*kDst_c+x;
int channel=idx_out%kDst_c;
int pixel=idx_out/kDst_c;
int idx_in=pixel*kChannels+channel;
T value;
if(channel>=kDel_c)idx_in+=1;
value=p_src[idx_in];
p_dst[idx_out]=value;
}
x+=blockDim.x;
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((src_w-1+x*2)/(x*2),(src_h-1+y)/y,1);
* kernel_delete_channel2<T><<<grid,block>>>(d_out,d_in,src_w,src_h,src_c,dst_c,del_c);
*/
template <typename T>
__global__ void kernelDeleteChannel2(T *p_dst,T *p_src,int const kWidth,int const kHeight,int const kChannels,int const kDst_c,int const kDel_c)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*2;
int y=threadIdx.y+blockIdx.y*blockDim.y;
if(x<kWidth&&y<kHeight)
{
int pixel=y*kWidth+x;
int pixel1=y*kWidth+x+blockDim.x;
T value;
int j=0;
for (int i = 0; i <kChannels ; ++i)
{
if(i!=kDel_c)
{
value=p_src[pixel*kChannels+i];
p_dst[pixel*kDst_c+j]=value;
value=p_src[pixel1*kChannels+i];
p_dst[pixel1*kDst_c+j]=value;
j++;
}
}
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((src_w-1+x*3)/(x*3),(src_h-1+y)/y,1);
* kernel_delete_channel3<T><<<grid,block>>>(d_out,d_in,src_w,src_h,src_c,dst_c,del_c);
*/
template <typename T>
__global__ void kernelDeleteChannel3(T *p_dst,T *p_src,int const kWidth,int const kHeight,int const kChannels,int const kDst_c,int const kDel_c)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
if(x<kWidth&&y<kHeight)
{
int pixel=y*kWidth+x;
int pixel2=pixel+blockDim.x;
int pixel3=pixel2+blockDim.x;
T value;
int j=0;
for (int i = 0; i <kChannels ; ++i)
{
if(i!=kDel_c)
{
value=p_src[pixel*kChannels+i];
p_dst[pixel*kDst_c+j]=value;
value=p_src[pixel2*kChannels+i];
p_dst[pixel2*kDst_c+j]=value;
value=p_src[pixel3*kChannels+i];
p_dst[pixel3*kDst_c+j]=value;
j++;
}
}
}
}
/******************************************************************************************/
///
/******************************************************************************************/
///
template <typename T>
int fillColorCu(T *p_image,T *p_color,int const kWidth,int const kHeight,int const kChannels,int const kColor_size)
{
//bool flag= false;
if(kChannels!=kColor_size)
{
std::cerr<<""<<std::endl;
return 0;
}
int wc=kWidth*kChannels;
//
T *p_d_out=NULL;
T *p_d_color=NULL;
//
size_t const kImagebytes=kWidth*kHeight*kChannels*sizeof(T);
int const kColorbytes=kColor_size* sizeof(T);
//
hipMalloc((void**)&p_d_out ,kImagebytes);
hipMalloc((void**)&p_d_color,kColorbytes);
//cpu2gpu
hipMemcpy(p_d_color,p_color,kColorbytes,hipMemcpyHostToDevice);
//
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((wc-1+x*15)/(x*15),(kHeight-1+y)/y,1);
hipLaunchKernelGGL(( kernelFillColorByShare15<T>), dim3(grid),dim3(block),kColorbytes, 0, p_d_out,p_d_color,wc,kHeight,kChannels);
//gpu2cpu
hipMemcpy(p_image,p_d_out,kImagebytes,hipMemcpyDeviceToHost);
//
hipFree(p_d_out);
hipFree(p_d_color);
return 0;
}
///()
template <typename T>
int addChannelsCu(T *p_dst_image,T * p_src_image,int const kWidth,int const kHeight,int const kChannels, int const kNum_channels,T value)
{
if(kNum_channels<=0)
{
std::cerr<<"1"<<std::endl;
return 0;
}
int const wc =kWidth*kChannels;//
int const wc_add=kWidth*(kChannels+kNum_channels);//
//
size_t const kBytes_src=wc*kHeight* sizeof(T);
size_t const kBytes_dst=wc_add*kHeight* sizeof(T);
//
T *p_d_in=NULL,*p_d_out=NULL;
//
hipMalloc((void**)&p_d_in ,kBytes_src);
hipMalloc((void**)&p_d_out,kBytes_dst);
//cpu2gpu
hipMemcpy(p_d_in,p_src_image,kBytes_src,hipMemcpyHostToDevice);
//
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((kWidth-1+x*2)/(x*2),(kHeight-1+y)/y,1);
//
hipLaunchKernelGGL(( kernelAddChannelStride2<T>), dim3(grid),dim3(block), 0, 0, p_d_out,p_d_in,kWidth,kHeight,kChannels,kNum_channels,value);
//gpu2cpu
hipMemcpy(p_dst_image,p_d_out,kBytes_dst,hipMemcpyDeviceToHost);
///
hipFree(p_d_in);
hipFree(p_d_out);
return 0;
}
///()
template <typename T>
int addChannelsCu(T *p_dst_image,T * p_src_image,int const kWidth,int const kHeight,int const kChannels, int const kNum_channels,T * p_value,bool _front_back=true)
{
if(kNum_channels<=0)
{
std::cerr<<"1"<<std::endl;
return 0;
}
int const wc =kWidth*kChannels;//
int const wc_add=kWidth*(kChannels+kNum_channels);//
//
size_t const kBytes_value=kNum_channels* sizeof(T);
size_t const kBytes_src =wc*kHeight* sizeof(T);
size_t const kBytes_dst =wc_add*kHeight* sizeof(T);
//
T *p_d_in=NULL,*p_d_out=NULL,*p_d_value=NULL;
//
hipMalloc((void**)&p_d_value,kBytes_value);
hipMalloc((void**)&p_d_in,kBytes_src);
hipMalloc((void**)&p_d_out,kBytes_dst);
//cpu2gpu
hipMemcpy(p_d_value,p_value,kBytes_value,hipMemcpyHostToDevice);
hipMemcpy(p_d_in,p_src_image,kBytes_src,hipMemcpyHostToDevice);
//
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((kWidth-1+x*2)/(x*2),(kHeight-1+y)/y,1);
//
hipLaunchKernelGGL(( kernelAddChannelsStride2<T>), dim3(grid),dim3(block), 0, 0, p_d_out,p_d_in,kWidth,kHeight,kChannels,kNum_channels,p_d_value,_front_back);
//gpu2cpu
hipMemcpy(p_dst_image,p_d_out,kBytes_dst,hipMemcpyDeviceToHost);
///
hipFree(p_d_in);
hipFree(p_d_out);
hipFree(p_d_value);
return 0;
}
///
template <typename T>
int swapChannelsByCu(T *p_src,int const kWidth,int const kHeight,int const kChannels,int const kSwap_c1,int kSwap_c2)
{
if(kSwap_c1==kSwap_c2)return 0;
if(kSwap_c1<0||kSwap_c1>=kChannels||kSwap_c2<0||kSwap_c2>=kChannels)
{
std::cerr<<"swapChannelsByCuda!!"<<std::endl;
return 1;
}
//
size_t const kBytes=kWidth*kHeight*kChannels* sizeof(T);
//
T *p_d_in=NULL;
//
hipMalloc((void**)&p_d_in,kBytes);
//cpu2gpu
hipMemcpy(p_d_in,p_src,kBytes,hipMemcpyHostToDevice);
//
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((kWidth-1+x)/(x),(kHeight-1+y)/y,1);
//
hipLaunchKernelGGL(( kernelSwapChannels<T>), dim3(grid),dim3(block), 0, 0, p_d_in,kWidth,kHeight,kChannels,kSwap_c1,kSwap_c2);
//gpu2cpu
hipMemcpy(p_src,p_d_in,kBytes,hipMemcpyDeviceToHost);
//
hipFree(p_d_in);
return 0;
}
///
template <typename T>
int copyChannelsByCu(T *p_image,int const kWidth,int const kHeight,int const kChannels,int const kCopy_c,int const kPaste_c)
{
if(kCopy_c>=kChannels||kPaste_c>=kChannels)
{
std::cerr<<""<<std::endl;
return 1;
}
if(kCopy_c==kPaste_c)return 0;
if(kPaste_c<0)
{
//TODO:
}
//
size_t const kBytes=kWidth*kHeight*kChannels* sizeof(T);
//
T *p_d_in=NULL;
//
hipMalloc(&p_d_in,kBytes);
//cpu2gpu
hipMemcpy(p_d_in,p_image,kBytes,hipMemcpyHostToDevice);
//
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((kWidth-1+x)/(x),(kHeight-1+y)/y,1);
//
hipLaunchKernelGGL(( kernelCopyChannels<T>), dim3(grid),dim3(block), 0, 0, p_d_in,kWidth,kHeight,kChannels,kCopy_c,kPaste_c);
//gpu2cpu
hipMemcpy(p_image,p_d_in,kBytes,hipMemcpyDeviceToHost);
//
hipFree(p_d_in);
return 0;
}
///
template <typename T>
int deleteChannelByCu(T *p_dstImage,T *p_srcImage,int const kSrc_width,int const kSrc_height,int const kSrc_channels,int const kDel_channel)
{
if(kDel_channel<0||kDel_channel>=kSrc_channels)return 0;
int dst_c=kSrc_channels-1;//
//
size_t const kBytes_in=kSrc_width*kSrc_height*kSrc_channels* sizeof(T);
size_t const kBytes_out=kSrc_width*kSrc_height*dst_c* sizeof(T);
//
T *p_d_in=NULL;
T *p_d_out=NULL;
//
hipMalloc(&p_d_in ,kBytes_in);
hipMalloc(&p_d_out,kBytes_out);
//cpu2gpu
hipMemcpy(p_d_in,p_srcImage,kBytes_in,hipMemcpyHostToDevice);
//
int x=32;
int y=2;
dim3 block(x,y,1);
dim3 grid((kSrc_width-1+x*2)/(x*2),(kSrc_height-1+y)/y,1);
//
hipLaunchKernelGGL(( kernelDeleteChannel2<T>), dim3(grid),dim3(block), 0, 0, p_d_out,p_d_in,kSrc_width,kSrc_height,kSrc_channels,dst_c,kDel_channel);
//gpu2cpu
hipMemcpy(p_dstImage,p_d_out,kBytes_out,hipMemcpyDeviceToHost);
//
hipFree(p_d_in);
hipFree(p_d_out);
return 0;
}
/******************************************************************************************/
///
/******************************************************************************************/
///
template <typename T>
int fillColorByCuda(T *image,T *color,int const w,int const h,int const c,int const color_size)
{
fillColorCu<T>(image,color,w,h,c, color_size);
return 0;
}
template <>
int fillColorByCuda(char *image,char *color,int const w,int const h,int const c,int const color_size)
{
fillColorCu<char>(image,color,w,h,c, color_size);
//compare1<char>(image,contrast,w*c,h, false);
return 0;
}
template <>
int fillColorByCuda(float *image,float *color,int const w,int const h,int const c,int const color_size)
{
fillColorCu<float>(image,color,w,h,c, color_size);
//compare1<float>(image,contrast,w*c,h, true);
return 0;
}
///()
template <typename T>
int addChannelsByCuda(T *dst_image,T * src_image,int const w,int const h, int const c, int const num_channels,T value)
{
addChannelsCu(dst_image,src_image, w, h, c,num_channels,value);
return 0;
}
template <>
int addChannelsByCuda(char *dst_image,char * src_image,int const w,int const h, int const c, int const num_channels,char value)
{
addChannelsCu<char>(dst_image,src_image, w, h, c,num_channels,value);
return 0;
}
template <>
int addChannelsByCuda(float *dst_image,float * src_image,int const w,int const h, int const c, int const num_channels,float value)
{
addChannelsCu<float>(dst_image,src_image, w, h, c,num_channels,value);
//compare1<float>(dst_image,contrast,w*c,h, true);
return 0;
}
///(/)
template <typename T>
int addChannelsFrontByCuda(T *dst_image,T * src_image,int const w,int const h, int const c, vector<T> value,bool _front_back)
{
addChannelsCu(dst_image,src_image, w, h, c,(int)value.size(),&value.at(0),_front_back);
//compare1(dst_image,contrast,w*c,h, false);
return 0;
}
template <>
int addChannelsFrontByCuda(char *dst_image,char * src_image,int const w,int const h, int const c, vector<char> value,bool _front_back)
{
addChannelsCu<char>(dst_image,src_image, w, h, c,(int)value.size(),&value.at(0),_front_back);
return 0;
}
template <>
int addChannelsFrontByCuda(float *dst_image,float * src_image,int const w,int const h, int const c, vector<float> value,bool _front_back)
{
addChannelsCu<float>(dst_image,src_image, w, h, c,(int)value.size(),&value.at(0),_front_back);
return 0;
}
///
template <typename T>
int swapChannelsByCuda(T *src,int const w,int const h,int c,int const swap_c1,int swap_c2)
{
return 0;
}
template <>
int swapChannelsByCuda(char *src,int const w,int const h,int c,int const swap_c1,int swap_c2)
{
swapChannelsByCu<char>(src,w,h,c,swap_c1,swap_c2);
//compare1<char>(src,contrast,w*c,h, false);
return 0;
}
template <>
int swapChannelsByCuda(float *src,int const w,int const h,int c,int const swap_c1,int swap_c2)
{
swapChannelsByCu<float>(src,w,h,c,swap_c1,swap_c2);
//compare1<float>(src,contrast,w*c,h, true);
return 0;
}
///
template <typename T>
int copyChannelsByCuda(T *image,int const w,int const h,int const c,int const copy_c,int const paste_c)
{
return 0;
}
template <>
int copyChannelsByCuda(char *image,int const w,int const h,int const c,int const copy_c,int const paste_c)
{
copyChannelsByCu<char>(image,w,h,c,copy_c,paste_c);
return 0;
}
template <>
int copyChannelsByCuda(float *image,int const w,int const h,int const c,int const copy_c,int const paste_c)
{
copyChannelsByCu<float>(image,w,h,c,copy_c,paste_c);
return 0;
}
///
template <typename T>
int deleteChannelByCuda(T *dstImage,T *srcImage,int const src_w,int const src_h,int const src_c,int const del_c)
{
return 0;
}
template <>
int deleteChannelByCuda(char *dstImage,char *srcImage,int const src_w,int const src_h,int const src_c,int const del_c)
{
deleteChannelByCu<char>(dstImage,srcImage,src_w,src_h,src_c,del_c);
return 0;
}
template <>
int deleteChannelByCuda(float *dstImage,float *srcImage,int const src_w,int const src_h,int const src_c,int const del_c)
{
deleteChannelByCu<float>(dstImage,srcImage,src_w,src_h,src_c,del_c);
return 0;
}
|
889b55f0b0c7444a78a17cddbc7e0b0964ed70ff.cu
|
/*
* @功能 image.hpp内TODO函数实现
* @姓名 杨丰拓
* @日期 2019-4-29
* @时间 17:14
* @邮箱
*/
#include <cuda_runtime.h>
#include <iostream>
#include <vector>
#include "cuda_include/common.cuh"
#include "cuda_include/sharemem.cuh"
#include <cstdio>
template <typename T>
void gpu_cpu2zero1(T *cpu,T *gpu,size_t bytes)
{
memset(cpu, 0, bytes);
cudaMemset(gpu,0,bytes);
}
/******************************************************************************************/
///功能:填充图像
/* 函数名 线程块大小 耗费时间
* kernel_fill_color 702.651us [32,4,1]
* kernel_fill_color3 705.469us [32,16,1]
* kernel_fill_color3_by_share 400.097us [32,4,1]
* kernel_fill_color15_by_share 253.638us [32,4,1]**
*/
///核函数
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((kFact_width-1+x)/x,(h-1+y)/y,1);
* kernel_fill_color<T><<<grid,block>>>(d_out,d_color,kFact_width,h,c);
*/
template <typename T>
__global__ void kernelFillColor(T * p_image, T *p_color,int const kFact_width,int const kHeight,int const kChannels)
{
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=y*kFact_width+x;
//越界判断
if(x<kFact_width&&y<kHeight)
{
int channels=idx%kChannels;
p_image[idx]=p_color[channels];
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((kFact_width-1+x*3)/(x*3),(h-1+y)/y,1);
* kernel_fill_color3<T><<<grid,block>>>(d_out,d_color,kFact_width,h,c);
*/
template <typename T>
__global__ void kernelFillColor3(T * p_image, T *p_color,int const kFact_width,int const kHeight,int const kChannels)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=y*kFact_width+x;
T local_color[4];
for(int i=0;i<kChannels;i++)
{
local_color[i]=p_color[i];
}
//越界判断
if((x+blockDim.x*2)<kFact_width&&y<kHeight)
{
int channels=idx%kChannels;
p_image[idx]=local_color[channels];
idx+=blockDim.x;
channels=idx%kChannels;
p_image[idx]=local_color[channels];
idx+=blockDim.x;
channels=idx%kChannels;
p_image[idx]=local_color[channels];
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((kFact_width-1+x*3)/(x*3),(h-1+y)/y,1);
* kernel_fill_color3_by_share<T><<<grid,block,colorbytes>>>(d_out,d_color,kFact_width,h,c);
*/
template <typename T>
__global__ void kernelFillColorByShare3(T * p_image, T *p_color,int const kFact_width,int const kHeight,int const kChannels)
{
sharedMemory<T> smem;
T* data = smem.getPointer();
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=y*kFact_width+x;
int sidx=threadIdx.y*blockDim.x+threadIdx.x;
if(sidx<kChannels)data[sidx]=p_color[sidx];
__syncthreads();
//越界判断
if((x+blockDim.x*2)<kFact_width&&y<kHeight)
{
int channels;
for(int k=0;k<3;k++)
{
channels=idx%kChannels;
p_image[idx]=data[channels];
idx+=blockDim.x;
}
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((kFact_width-1+x*15)/(x*15),(h-1+y)/y,1);
* kernel_fill_color15_by_share<T><<<grid,block,colorbytes>>>(d_out,d_color,kFact_width,h,c);
*/
template <typename T>
__global__ void kernelFillColorByShare15(T * p_image, T *p_color,int const kFact_width,int const kHeight,int const kChannels)
{
sharedMemory<T> smem;
T* data = smem.p_getPointer();
int x=threadIdx.x+blockIdx.x*blockDim.x*15;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=y*kFact_width+x;
int sidx=threadIdx.y*blockDim.x+threadIdx.x;
if(sidx<kChannels)data[sidx]=p_color[sidx];
__syncthreads();
//越界判断
if(x<kFact_width&&y<kHeight)
{
int channels;
for(int k=0;k<15;k++)
{
channels=idx%kChannels;
p_image[idx]=data[channels];
idx+=blockDim.x;
}
}
}
///功能:添加颜色通道
/* 函数名 线程块大小 耗费时间
* kernel_add_channels 1.131ms [32,4,1]
* kernel_add_channels_stride 507.197us [32,4,1]
* kernel_add_channels_stride2 422.649us [32,4,1]**
*/
///核函数
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c_add-1+x)/(x),(h-1+y)/y,1);
* kernel_add_channels<T><<<grid,block>>>(d_out,d_in,w,h,c,num_channels,value);
*/
template <typename T>
__global__ void kernelAddChannel(T *p_dst,T *p_src, int const kWidth,int const kHeight,int const kChannels,int const kNum_channels,T value)
{
int x=threadIdx.x+blockIdx.x*blockDim.x;//x坐标索引
int y=threadIdx.y+blockIdx.y*blockDim.y;//y坐标索引
int c_add=kChannels+kNum_channels;
int idx=y*kWidth*c_add+x;//输出索引
if(x<kWidth*c_add&&y<kHeight)
{
int channels=idx%c_add;
int pixels=idx/c_add;
if (channels < kChannels) p_dst[idx] = p_src[pixels * kChannels + channels];
else p_dst[idx] = value;
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w-1+x)/(x),(h-1+y)/y,1);
* kernel_add_channels_stride<T><<<grid,block>>>(d_out,d_in,w,h,c,num_channels,value);
*/
template <typename T>
__global__ void kernelAddChannelStride(T *p_dst,T *p_src, int const kWidth,int const kHeight,int const kChannels,int const kNum_channels,T value)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;//x坐标索引
int y = threadIdx.y + blockIdx.y * blockDim.y;//y坐标索引
int c_add=kChannels+kNum_channels;
int idx_out = y * kWidth * c_add + x * c_add;//输出索引
int idx_in = y * kWidth * kChannels + x * kChannels;//输入索引
if (x < kWidth && y < kHeight)
{
for (int i = 0; i <kChannels ; ++i) p_dst[idx_out+i]=p_src[idx_in+i];
for (int j = 0; j <kNum_channels ; ++j) p_dst[idx_out+kChannels+j]=value;
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w-1+x*2)/(x*2),(h-1+y)/y,1);
* kernel_add_channels_stride2<T><<<grid,block>>>(d_out,d_in,w,h,c,num_channels,value);
*/
template <typename T>
__global__ void kernelAddChannelStride2(T *p_dst,T *p_src, int const kWidth,int const kHeight,int const kChannels,int const kNum_channels,T value)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*2;//x坐标索引
int y=threadIdx.y+blockIdx.y*blockDim.y;//y坐标索引
int c_add=kChannels+kNum_channels;
int idx_out=y*kWidth*c_add+x*c_add;//输出索引
int idx_in=y*kWidth*kChannels+x*kChannels;//输入索引
if (x < kWidth && y < kHeight)
{
for (int i = 0; i <kChannels ; ++i)
{
p_dst[idx_out+i]=p_src[idx_in+i];
p_dst[idx_out+blockDim.x*c_add+i]=p_src[idx_in+blockDim.x*kChannels+i];
}
for (int j = 0; j <kNum_channels ; ++j) {
p_dst[idx_out + kChannels + j] = value;
p_dst[idx_out + blockDim.x * c_add + kChannels + j] = value;
}
}
}
///功能:添加颜色通道(多颜色数据)
/* 函数名 线程块大小 耗费时间
* kernel_add_channels 1.131ms [32,4,1]
* kernel_add_channels_stride 507.197us [32,4,1]
* kernel_add_channels_stride2 422.649us [32,4,1]**
*/
///核函数
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c_add-1+x)/(x),(h-1+y)/y,1);
* kernel_add_channels<T><<<grid,block>>>(d_out,d_in,w,h,c,num_channels,d_value,_front_back);
*/
template <typename T>
__global__ void kernelAddChannels(T *p_dst,T *p_src, int const kWidth,int const kHeight,int const kChannels,int const kNum_channels,T * p_value,bool _front_back)
{
int x=threadIdx.x+blockIdx.x*blockDim.x;//x坐标索引
int y=threadIdx.y+blockIdx.y*blockDim.y;//y坐标索引
int c_add=kChannels+kNum_channels;
int idx=y*kWidth*c_add+x;//输出索引
if(x<kWidth*c_add&&y<kHeight)
{
int channels=idx%c_add;
int pixels=idx/c_add;
if(_front_back)
{
if (channels < kChannels) p_dst[idx] = p_src[pixels * kChannels + channels];
else p_dst[idx] = p_value[channels - kChannels];
}
else
{
if (channels < kNum_channels) p_dst[idx] = p_value[channels];
else p_dst[idx] = p_src[pixels * kChannels + channels - kNum_channels];
}
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w-1+x)/(x),(h-1+y)/y,1);
* kernel_add_channels_stride<T><<<grid,block>>>(d_out,d_in,w,h,c,num_channels,d_value,_front_back);
*/
template <typename T>
__global__ void kernelAddChannelsStride(T *p_dst,T *p_src, int const kWidth,int const kHeight,int const kChannels,int const kNum_channels,T * p_value,bool _front_back)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;//x坐标索引
int y = threadIdx.y + blockIdx.y * blockDim.y;//y坐标索引
int c_add=kChannels+kNum_channels;
int idx_out = y * kWidth * c_add + x * c_add;//输出索引
int idx_in = y * kWidth * kChannels + x * kChannels;//输入索引
if (x < kWidth && y < kHeight)
{
if(_front_back)
{
for (int i = 0; i <kChannels ; ++i) p_dst[idx_out+i]=p_src[idx_in+i];
for (int j = 0; j <kNum_channels ; ++j) p_dst[idx_out+kChannels+j]=p_value[j];
}
else
{
for (int j = 0; j <kNum_channels ; ++j) p_dst[idx_out+j]=p_value[j];
for (int i = 0; i <kChannels ; ++i) p_dst[idx_out+kNum_channels+i]=p_src[idx_in+i];
}
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w-1+x*2)/(x*2),(h-1+y)/y,1);
* kernel_add_channels_stride2<T><<<grid,block>>>(d_out,d_in,w,h,c,num_channels,d_value,_front_back);
*/
template <typename T>
__global__ void kernelAddChannelsStride2(T *p_dst,T *p_src, int const kWidth,int const kHeight,int const kChannels,int const kNum_channels,T * p_value,bool _front_back)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*2;//x坐标索引
int y=threadIdx.y+blockIdx.y*blockDim.y;//y坐标索引
int c_add=kChannels+kNum_channels;
int idx_out=y*kWidth*c_add+x*c_add;//输出索引
int idx_in=y*kWidth*kChannels+x*kChannels;//输入索引
if (x < kWidth && y < kHeight)
{
if(_front_back)
{
for (int i = 0; i <kChannels ; ++i)
{
p_dst[idx_out+i]=p_src[idx_in+i];
p_dst[idx_out+blockDim.x*c_add+i]=p_src[idx_in+blockDim.x*kChannels+i];
}
for (int j = 0; j <kNum_channels ; ++j)
{
p_dst[idx_out+kChannels+j]=p_value[j];
p_dst[idx_out+blockDim.x*c_add+kChannels+j]=p_value[j];
}
}
else
{
for (int j = 0; j <kNum_channels ; ++j)
{
p_dst[idx_out+j]=p_value[j];
p_dst[idx_out+blockDim.x*c_add+j]=p_value[j];
}
for (int i = 0; i <kChannels ; ++i)
{
p_dst[idx_out+kNum_channels+i]=p_src[idx_in+i];
p_dst[idx_out+blockDim.x*c_add+kNum_channels+i]=p_src[idx_in+blockDim.x*kChannels+i];
}
}
}
}
///功能:交换颜色通道
/* 函数名 线程块大小 耗费时间
* kernel_swap_channels 283.847us [32,4,1]**
* kernel_swap_channels2 293.352us [32,4,1]
*/
///核函数
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w-1+x)/(x),(h-1+y)/y,1);
* kernel_swap_channels<T><<<grid,block>>>(d_in,w,h,c,swap_c1,swap_c2);
*/
template <typename T>
__global__ void kernelSwapChannels(T *p_src,int const kWidth,int const kHeight,int const kChannels, int const kSwap_c1,int const kSwap_c2)
{
int const x=threadIdx.x+blockDim.x*blockIdx.x;
int const y=threadIdx.y+blockDim.y*blockIdx.y;
int const idx=y*kWidth+x;
if(x<kWidth&&y<kHeight)
{
T a,b;
a=p_src[idx*kChannels+kSwap_c1];
b=p_src[idx*kChannels+kSwap_c2];
p_src[idx*kChannels+kSwap_c1]=b;
p_src[idx*kChannels+kSwap_c2]=a;
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w-1+x)/(x),(h-1+y*2)/(y*2),1);
* kernel_swap_channels2<T><<<grid,block>>>(d_in,w,h,c,swap_c1,swap_c2);
*/
template <typename T>
__global__ void kernelSwapChannels2(T *p_src,int const kWidth,int const kHeight,int const kChannels, int const kSwap_c1,int const kSwap_c2)
{
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y*2;
for(int i=0;i<2;i++)
{
int idx=(y+blockDim.y*i)*kWidth*kChannels+x*kChannels;
if(x<kWidth&&(y+blockDim.y*i)<kHeight)
{
T a,b;
a=p_src[idx*kChannels+kSwap_c1];
b=p_src[idx*kChannels+kSwap_c2];
p_src[idx*kChannels+kSwap_c1]=b;
p_src[idx*kChannels+kSwap_c2]=a;
}
}
}
///功能:复制颜色通道
/* 函数名 线程块大小 耗费时间
* kernel_copy_channels 286.692us [32,4,1]**
*/
///核函数
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w-1+x)/(x),(h-1+y)/y,1);
* kernel_copy_channels<T><<<grid,block>>>(d_in,w,h,c,copy_c,paste_c);
*/
template <typename T>
__global__ void kernelCopyChannels(T *p_image,int const kWidth,int const kHeight,int const kChannels,int const kCopy_c,int const kPaste_c)
{
int x=blockDim.x*blockIdx.x+threadIdx.x;
int y=blockDim.y*blockIdx.y+threadIdx.y;
if(x<kWidth&&y<kHeight)
{
int idx=y*kWidth*kChannels+x*kChannels;
T value=p_image[idx+kChannels];
p_image[idx+kPaste_c]=value;
}
}
///功能:删除颜色通道
/* 函数名 线程块大小 耗费时间
* kernel_delete_channel 468.206us [32,4,1]
* kernel_delete_channel2 322.506us [32,2,1]**
* kernel_delete_channel3 334.987us [32,2,1]
*/
///核函数
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((src_w*dst_c-1+x*5)/(x*5),(src_h-1+y)/y,1);
* kernel_delete_channel<T><<<grid,block>>>(d_out,d_in,src_w,src_h,src_c,dst_c,del_c);
*/
template <typename T>
__global__ void kernelDeleteChannel(T *p_dst,T *p_src,int const kWidth,int const kHeight,int const kChannels,int const kDst_c,int const kDel_c)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*5;
int y=threadIdx.y+blockIdx.y*blockDim.y;
for (int i = 0; i <5 ; ++i) {
if(x<kWidth*kDst_c&&y<kHeight)
{
int idx_out=y*kWidth*kDst_c+x;
int channel=idx_out%kDst_c;
int pixel=idx_out/kDst_c;
int idx_in=pixel*kChannels+channel;
T value;
if(channel>=kDel_c)idx_in+=1;
value=p_src[idx_in];
p_dst[idx_out]=value;
}
x+=blockDim.x;
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((src_w-1+x*2)/(x*2),(src_h-1+y)/y,1);
* kernel_delete_channel2<T><<<grid,block>>>(d_out,d_in,src_w,src_h,src_c,dst_c,del_c);
*/
template <typename T>
__global__ void kernelDeleteChannel2(T *p_dst,T *p_src,int const kWidth,int const kHeight,int const kChannels,int const kDst_c,int const kDel_c)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*2;
int y=threadIdx.y+blockIdx.y*blockDim.y;
if(x<kWidth&&y<kHeight)
{
int pixel=y*kWidth+x;
int pixel1=y*kWidth+x+blockDim.x;
T value;
int j=0;
for (int i = 0; i <kChannels ; ++i)
{
if(i!=kDel_c)
{
value=p_src[pixel*kChannels+i];
p_dst[pixel*kDst_c+j]=value;
value=p_src[pixel1*kChannels+i];
p_dst[pixel1*kDst_c+j]=value;
j++;
}
}
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((src_w-1+x*3)/(x*3),(src_h-1+y)/y,1);
* kernel_delete_channel3<T><<<grid,block>>>(d_out,d_in,src_w,src_h,src_c,dst_c,del_c);
*/
template <typename T>
__global__ void kernelDeleteChannel3(T *p_dst,T *p_src,int const kWidth,int const kHeight,int const kChannels,int const kDst_c,int const kDel_c)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
if(x<kWidth&&y<kHeight)
{
int pixel=y*kWidth+x;
int pixel2=pixel+blockDim.x;
int pixel3=pixel2+blockDim.x;
T value;
int j=0;
for (int i = 0; i <kChannels ; ++i)
{
if(i!=kDel_c)
{
value=p_src[pixel*kChannels+i];
p_dst[pixel*kDst_c+j]=value;
value=p_src[pixel2*kChannels+i];
p_dst[pixel2*kDst_c+j]=value;
value=p_src[pixel3*kChannels+i];
p_dst[pixel3*kDst_c+j]=value;
j++;
}
}
}
}
/******************************************************************************************/
///调用核函数实现加速功能
/******************************************************************************************/
///填充颜色通道函数
template <typename T>
int fillColorCu(T *p_image,T *p_color,int const kWidth,int const kHeight,int const kChannels,int const kColor_size)
{
//bool flag= false;
if(kChannels!=kColor_size)
{
std::cerr<<"颜色通道不匹配"<<std::endl;
return 0;
}
int wc=kWidth*kChannels;
//定义显存指针
T *p_d_out=NULL;
T *p_d_color=NULL;
//计算显存所需字节数
size_t const kImagebytes=kWidth*kHeight*kChannels*sizeof(T);
int const kColorbytes=kColor_size* sizeof(T);
//分配显存
cudaMalloc((void**)&p_d_out ,kImagebytes);
cudaMalloc((void**)&p_d_color,kColorbytes);
//cpu2gpu
cudaMemcpy(p_d_color,p_color,kColorbytes,cudaMemcpyHostToDevice);
//线程网格划分
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((wc-1+x*15)/(x*15),(kHeight-1+y)/y,1);
kernelFillColorByShare15<T><<<grid,block,kColorbytes>>>(p_d_out,p_d_color,wc,kHeight,kChannels);
//gpu2cpu
cudaMemcpy(p_image,p_d_out,kImagebytes,cudaMemcpyDeviceToHost);
//释放显存
cudaFree(p_d_out);
cudaFree(p_d_color);
return 0;
}
///增加颜色通道函数(单通道多数据)
template <typename T>
int addChannelsCu(T *p_dst_image,T * p_src_image,int const kWidth,int const kHeight,int const kChannels, int const kNum_channels,T value)
{
if(kNum_channels<=0)
{
std::cerr<<"所添加的颜色通道个数小于1"<<std::endl;
return 0;
}
int const wc =kWidth*kChannels;//输入图像实际宽度
int const wc_add=kWidth*(kChannels+kNum_channels);//输出图像实际宽度
//计算存储空间字节数
size_t const kBytes_src=wc*kHeight* sizeof(T);
size_t const kBytes_dst=wc_add*kHeight* sizeof(T);
//声明显存指针
T *p_d_in=NULL,*p_d_out=NULL;
//定义显存指针
cudaMalloc((void**)&p_d_in ,kBytes_src);
cudaMalloc((void**)&p_d_out,kBytes_dst);
//cpu2gpu
cudaMemcpy(p_d_in,p_src_image,kBytes_src,cudaMemcpyHostToDevice);
//网格划分
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((kWidth-1+x*2)/(x*2),(kHeight-1+y)/y,1);
//核函数
kernelAddChannelStride2<T><<<grid,block>>>(p_d_out,p_d_in,kWidth,kHeight,kChannels,kNum_channels,value);
//gpu2cpu
cudaMemcpy(p_dst_image,p_d_out,kBytes_dst,cudaMemcpyDeviceToHost);
///释放显存指针
cudaFree(p_d_in);
cudaFree(p_d_out);
return 0;
}
///增加颜色通道函数(多通道多数据)
template <typename T>
int addChannelsCu(T *p_dst_image,T * p_src_image,int const kWidth,int const kHeight,int const kChannels, int const kNum_channels,T * p_value,bool _front_back=true)
{
if(kNum_channels<=0)
{
std::cerr<<"所添加的颜色通道个数小于1"<<std::endl;
return 0;
}
int const wc =kWidth*kChannels;//输入图像实际宽度
int const wc_add=kWidth*(kChannels+kNum_channels);//输出图像实际宽度
//计算存储空间字节数
size_t const kBytes_value=kNum_channels* sizeof(T);
size_t const kBytes_src =wc*kHeight* sizeof(T);
size_t const kBytes_dst =wc_add*kHeight* sizeof(T);
//声明显存指针
T *p_d_in=NULL,*p_d_out=NULL,*p_d_value=NULL;
//定义显存指针
cudaMalloc((void**)&p_d_value,kBytes_value);
cudaMalloc((void**)&p_d_in,kBytes_src);
cudaMalloc((void**)&p_d_out,kBytes_dst);
//cpu2gpu
cudaMemcpy(p_d_value,p_value,kBytes_value,cudaMemcpyHostToDevice);
cudaMemcpy(p_d_in,p_src_image,kBytes_src,cudaMemcpyHostToDevice);
//网格划分
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((kWidth-1+x*2)/(x*2),(kHeight-1+y)/y,1);
//核函数
kernelAddChannelsStride2<T><<<grid,block>>>(p_d_out,p_d_in,kWidth,kHeight,kChannels,kNum_channels,p_d_value,_front_back);
//gpu2cpu
cudaMemcpy(p_dst_image,p_d_out,kBytes_dst,cudaMemcpyDeviceToHost);
///释放显存指针
cudaFree(p_d_in);
cudaFree(p_d_out);
cudaFree(p_d_value);
return 0;
}
///交换颜色通道函数
template <typename T>
int swapChannelsByCu(T *p_src,int const kWidth,int const kHeight,int const kChannels,int const kSwap_c1,int kSwap_c2)
{
if(kSwap_c1==kSwap_c2)return 0;
if(kSwap_c1<0||kSwap_c1>=kChannels||kSwap_c2<0||kSwap_c2>=kChannels)
{
std::cerr<<"swapChannelsByCuda函数所要交换的颜色通道不合适!!"<<std::endl;
return 1;
}
//计算字节数
size_t const kBytes=kWidth*kHeight*kChannels* sizeof(T);
//声明显存指针
T *p_d_in=NULL;
//定义显存指针
cudaMalloc((void**)&p_d_in,kBytes);
//cpu2gpu
cudaMemcpy(p_d_in,p_src,kBytes,cudaMemcpyHostToDevice);
//网格划分
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((kWidth-1+x)/(x),(kHeight-1+y)/y,1);
//核函数
kernelSwapChannels<T><<<grid,block>>>(p_d_in,kWidth,kHeight,kChannels,kSwap_c1,kSwap_c2);
//gpu2cpu
cudaMemcpy(p_src,p_d_in,kBytes,cudaMemcpyDeviceToHost);
//释放显存指针
cudaFree(p_d_in);
return 0;
}
///复制颜色通道
template <typename T>
int copyChannelsByCu(T *p_image,int const kWidth,int const kHeight,int const kChannels,int const kCopy_c,int const kPaste_c)
{
if(kCopy_c>=kChannels||kPaste_c>=kChannels)
{
std::cerr<<"输入通道数超过图像的最大通道数"<<std::endl;
return 1;
}
if(kCopy_c==kPaste_c)return 0;
if(kPaste_c<0)
{
//TODO:向后添加一个全为零的颜色通道
}
//计算字节数
size_t const kBytes=kWidth*kHeight*kChannels* sizeof(T);
//声明显存指针
T *p_d_in=NULL;
//定义显存指针
cudaMalloc(&p_d_in,kBytes);
//cpu2gpu
cudaMemcpy(p_d_in,p_image,kBytes,cudaMemcpyHostToDevice);
//网格划分
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((kWidth-1+x)/(x),(kHeight-1+y)/y,1);
//核函数
kernelCopyChannels<T><<<grid,block>>>(p_d_in,kWidth,kHeight,kChannels,kCopy_c,kPaste_c);
//gpu2cpu
cudaMemcpy(p_image,p_d_in,kBytes,cudaMemcpyDeviceToHost);
//释放显存指针
cudaFree(p_d_in);
return 0;
}
///删除颜色通道
template <typename T>
int deleteChannelByCu(T *p_dstImage,T *p_srcImage,int const kSrc_width,int const kSrc_height,int const kSrc_channels,int const kDel_channel)
{
if(kDel_channel<0||kDel_channel>=kSrc_channels)return 0;
int dst_c=kSrc_channels-1;//输出通道数
//计算所需存储的字节数
size_t const kBytes_in=kSrc_width*kSrc_height*kSrc_channels* sizeof(T);
size_t const kBytes_out=kSrc_width*kSrc_height*dst_c* sizeof(T);
//声明显存指针
T *p_d_in=NULL;
T *p_d_out=NULL;
//定义显存指针
cudaMalloc(&p_d_in ,kBytes_in);
cudaMalloc(&p_d_out,kBytes_out);
//cpu2gpu
cudaMemcpy(p_d_in,p_srcImage,kBytes_in,cudaMemcpyHostToDevice);
//网格划分
int x=32;
int y=2;
dim3 block(x,y,1);
dim3 grid((kSrc_width-1+x*2)/(x*2),(kSrc_height-1+y)/y,1);
//核函数
kernelDeleteChannel2<T><<<grid,block>>>(p_d_out,p_d_in,kSrc_width,kSrc_height,kSrc_channels,dst_c,kDel_channel);
//gpu2cpu
cudaMemcpy(p_dstImage,p_d_out,kBytes_out,cudaMemcpyDeviceToHost);
//释放显存指针
cudaFree(p_d_in);
cudaFree(p_d_out);
return 0;
}
/******************************************************************************************/
///调用函数模板化
/******************************************************************************************/
///填充颜色通道函数
template <typename T>
int fillColorByCuda(T *image,T *color,int const w,int const h,int const c,int const color_size)
{
fillColorCu<T>(image,color,w,h,c, color_size);
return 0;
}
template <>
int fillColorByCuda(char *image,char *color,int const w,int const h,int const c,int const color_size)
{
fillColorCu<char>(image,color,w,h,c, color_size);
//compare1<char>(image,contrast,w*c,h, false);
return 0;
}
template <>
int fillColorByCuda(float *image,float *color,int const w,int const h,int const c,int const color_size)
{
fillColorCu<float>(image,color,w,h,c, color_size);
//compare1<float>(image,contrast,w*c,h, true);
return 0;
}
///增加颜色通道函数(后)
template <typename T>
int addChannelsByCuda(T *dst_image,T * src_image,int const w,int const h, int const c, int const num_channels,T value)
{
addChannelsCu(dst_image,src_image, w, h, c,num_channels,value);
return 0;
}
template <>
int addChannelsByCuda(char *dst_image,char * src_image,int const w,int const h, int const c, int const num_channels,char value)
{
addChannelsCu<char>(dst_image,src_image, w, h, c,num_channels,value);
return 0;
}
template <>
int addChannelsByCuda(float *dst_image,float * src_image,int const w,int const h, int const c, int const num_channels,float value)
{
addChannelsCu<float>(dst_image,src_image, w, h, c,num_channels,value);
//compare1<float>(dst_image,contrast,w*c,h, true);
return 0;
}
///增加颜色通道函数(前/后)
template <typename T>
int addChannelsFrontByCuda(T *dst_image,T * src_image,int const w,int const h, int const c, vector<T> value,bool _front_back)
{
addChannelsCu(dst_image,src_image, w, h, c,(int)value.size(),&value.at(0),_front_back);
//compare1(dst_image,contrast,w*c,h, false);
return 0;
}
template <>
int addChannelsFrontByCuda(char *dst_image,char * src_image,int const w,int const h, int const c, vector<char> value,bool _front_back)
{
addChannelsCu<char>(dst_image,src_image, w, h, c,(int)value.size(),&value.at(0),_front_back);
return 0;
}
template <>
int addChannelsFrontByCuda(float *dst_image,float * src_image,int const w,int const h, int const c, vector<float> value,bool _front_back)
{
addChannelsCu<float>(dst_image,src_image, w, h, c,(int)value.size(),&value.at(0),_front_back);
return 0;
}
///交换颜色通道
template <typename T>
int swapChannelsByCuda(T *src,int const w,int const h,int c,int const swap_c1,int swap_c2)
{
return 0;
}
template <>
int swapChannelsByCuda(char *src,int const w,int const h,int c,int const swap_c1,int swap_c2)
{
swapChannelsByCu<char>(src,w,h,c,swap_c1,swap_c2);
//compare1<char>(src,contrast,w*c,h, false);
return 0;
}
template <>
int swapChannelsByCuda(float *src,int const w,int const h,int c,int const swap_c1,int swap_c2)
{
swapChannelsByCu<float>(src,w,h,c,swap_c1,swap_c2);
//compare1<float>(src,contrast,w*c,h, true);
return 0;
}
///复制颜色通道
template <typename T>
int copyChannelsByCuda(T *image,int const w,int const h,int const c,int const copy_c,int const paste_c)
{
return 0;
}
template <>
int copyChannelsByCuda(char *image,int const w,int const h,int const c,int const copy_c,int const paste_c)
{
copyChannelsByCu<char>(image,w,h,c,copy_c,paste_c);
return 0;
}
template <>
int copyChannelsByCuda(float *image,int const w,int const h,int const c,int const copy_c,int const paste_c)
{
copyChannelsByCu<float>(image,w,h,c,copy_c,paste_c);
return 0;
}
///删除颜色通道
template <typename T>
int deleteChannelByCuda(T *dstImage,T *srcImage,int const src_w,int const src_h,int const src_c,int const del_c)
{
return 0;
}
template <>
int deleteChannelByCuda(char *dstImage,char *srcImage,int const src_w,int const src_h,int const src_c,int const del_c)
{
deleteChannelByCu<char>(dstImage,srcImage,src_w,src_h,src_c,del_c);
return 0;
}
template <>
int deleteChannelByCuda(float *dstImage,float *srcImage,int const src_w,int const src_h,int const src_c,int const del_c)
{
deleteChannelByCu<float>(dstImage,srcImage,src_w,src_h,src_c,del_c);
return 0;
}
|
2b9151c74955096e7b02de8987b8480788c27bd0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Includes
#include "CudaMath.h"
#include "Cloth.h"
// Computes the impacts between two points that are connected by a constraint in order to satisfy the constraint a little better.
__device__ float3 computeImpact(float3 me, float3 other, float stepsize, float h)
{
const float aimedDistance = 1.0 / (float)RESOLUTION_X;
float3 dir = other-me;
float ldir = length(dir);
if (ldir==0) return dir;
float e = (ldir - aimedDistance) * 0.5;
return dir/ldir * e / (h*h) * stepsize;
}
// Simple collision detection against a sphere at (0,0,0) with radius SPHERE_RADIUS and skin width SKIN_WIDTH.
__device__ float3 sphereCollision(float3 p, float h)
{
// TODO: Testen, ob Punkt im inneren der Kugel ist. Wenn ja, dann einen Impuls berechnen, der sie wieder heraus bewegt.
float3 impulse = make_float3(0,0,0);
float dist = length(p);
if(dist < (SPHERE_RADIUS + SKIN_WIDTH))
{
float3 s = normalize(p) * ((SPHERE_RADIUS + SKIN_WIDTH)-dist);
impulse = s/(h*h);
}
return impulse;
}
// -----------------------------------------------------------------------------------------------
// Aufsummieren der Impulse, die von den benachbarten Gitterpunkten ausgeuebt werden.
// impacts += ...
__global__ void computeImpacts(float3* oldPos, float3* impacts, float stepsize, float h)
{
// TODO: Positionen der benachbarten Gitterpunkte und des eigenen Gitterpunktes ablesen.
// own position:
int index = 0;
int X = threadIdx.x + blockIdx.x * blockDim.x;
int Y = threadIdx.y + blockIdx.y * blockDim.y;
int line = RESOLUTION_Y;
//index = Y + X * line;
index = Y + X * RESOLUTION_Y;
// neighbors (4-connected)
int indexN0 = (Y > 0)? Y-1 + X * line : -1;
int indexN1 = (X > 0)? Y + (X-1) * line : -1;
int indexN2 = (Y < line-1)? Y+1 + X * line : -1;
int indexN3 = (X < blockDim.x * gridDim.x-1)? Y + (X+1) * line : -1;
// TODO: Kollisionsbehandlung mit Kugel durchfuehren.
//oldPos[index] = sphereCollision(oldPos[index], h);
// TODO: Mit jedem Nachbar besteht ein Constraint. Dementsprechend fuer jeden Nachbar
// computeImpact aufrufen und die Ergebnisse aufsummieren.
float3 impact = sphereCollision(oldPos[index], h);
// neighbors also mustn't enter the sphere
/*impact += (indexN0 == -1)? zeroVec : computeImpact(oldPos[index], sphereCollision(oldPos[indexN0], h), stepsize, h);
impact += (indexN1 == -1)? zeroVec : computeImpact(oldPos[index], sphereCollision(oldPos[indexN1], h), stepsize, h);
impact += (indexN2 == -1)? zeroVec : computeImpact(oldPos[index], sphereCollision(oldPos[indexN2], h), stepsize, h);
impact += (indexN3 == -1)? zeroVec : computeImpact(oldPos[index], sphereCollision(oldPos[indexN3], h), stepsize, h);
*/
if(indexN0 != -1)
impact += computeImpact(oldPos[index], oldPos[indexN0], stepsize, h);
if(indexN1 != -1)
impact += computeImpact(oldPos[index], oldPos[indexN1], stepsize, h);
if(indexN2 != -1)
impact += computeImpact(oldPos[index], oldPos[indexN2], stepsize, h);
if(indexN3 != -1)
impact += computeImpact(oldPos[index], oldPos[indexN3], stepsize, h);
// TODO: Die Summe der Impulse auf "impacts" des eigenen Gitterpunkts addieren.
impacts[index] += impact;
}
// -----------------------------------------------------------------------------------------------
// Preview-Step
// newpos = oldpos + (velocity + impacts * h) * h
__global__ void previewSteps( float3* newPos, float3* oldPos, float3* impacts, float3* velocity,
float h)
{
// TODO: Berechnen, wo wir waeren, wenn wir eine Integration von der bisherigen Position
// mit der bisherigen Geschwindigkeit und den neuen Impulsen durchfuehren.
int index = 0;
// Index berechnen
int X = threadIdx.x + blockIdx.x * blockDim.x;
int Y = threadIdx.y + blockIdx.y * blockDim.y;
//index = Y + X * blockDim.y * gridDim.y;
index = Y + X * RESOLUTION_Y;
// TODO: don't check this here but find a proper way to not call it at all? ###########################################################################################
//if( Y == blockDim.y*gridDim.y-1)
// return;
newPos[index] = oldPos[index] + (velocity[index] + impacts[index] * h) * h;
}
// -----------------------------------------------------------------------------------------------
// Integrate velocity
// velocity = velocity * LINEAR_DAMPING + (impacts - (0,GRAVITY,0)) * h
__global__ void integrateVelocity( float3* impacts, float3* velocity, float h)
{
int index = 0;
// Index berechnen
int X = threadIdx.x + blockIdx.x * blockDim.x;
int Y = threadIdx.y + blockIdx.y * blockDim.y;
index = Y + X * RESOLUTION_Y;
// TODO: Update velocity.
velocity[index] = velocity[index] * LINEAR_DAMPING + (impacts[index] - make_float3(0,GRAVITY,0)) * h;
}
// -----------------------------------------------------------------------------------------------
// Test-Funktion die nur dazu da ist, damit man etwas sieht, sobald die VBOs gemapped werden...
__global__ void test( float3* newPos, float3* oldPos, float h)
{
int index = 0;
// Index berechnen
int X = threadIdx.x + blockIdx.x * blockDim.x;
int Y = threadIdx.y + blockIdx.y * blockDim.y;
index = Y + X * blockDim.y * gridDim.y;
// TODO: don't check this here but find a proper way to not call it at all? ###########################################################################################
if( Y == blockDim.y*gridDim.y-1)
return;
newPos[index] = oldPos[index] + make_float3(0, -h/20, 0);
newPos[index] = sphereCollision(newPos[index], h);
}
// -----------------------------------------------------------------------------------------------
void updateCloth( float3* newPos, float3* oldPos, float3* impacts, float3* velocity,
float h, float stepsize)
{
dim3 blocks(RESOLUTION_X, RESOLUTION_Y-1, 1);
// -----------------------------
// Clear impacts
hipMemset(impacts, 0, RESOLUTION_X*RESOLUTION_Y*sizeof(float3));
// Updating constraints is an iterative process.
// The more iterations we apply, the stiffer the cloth become.
for (int i=0; i<10; ++i)
{
// -----------------------------
// TODO: previewSteps Kernel aufrufen (Vorhersagen, wo die Gitterpunkte mit den aktuellen Impulsen landen wuerden.)
// newpos = oldpos + (velocity + impacts * h) * h
//previewSteps<<<blocks,1>>>(newPos, oldPos, impacts, velocity, h);
hipLaunchKernelGGL(( previewSteps), dim3(dim3(RESOLUTION_X,RESOLUTION_Y-1, 1)),dim3(1), 0, 0, newPos, oldPos, impacts, velocity, h);
// -----------------------------
// TODO: computeImpacts Kernel aufrufen (Die Impulse neu berechnen, sodass die Constraints besser eingehalten werden.)
// impacts += ...
hipLaunchKernelGGL(( computeImpacts), dim3(blocks),dim3(1), 0, 0, newPos, impacts, stepsize, h);
}
// call test fuction to see if the mapping has worked
//test<<<blocks, 1>>>(newPos, oldPos, h);
// -----------------------------
// TODO: Approximieren der Normalen ###################################################################################################################################
// -----------------------------
// TODO: Integrate velocity kernel ausfuehren
// Der kernel berechnet: velocity = velocity * LINEAR_DAMPING + (impacts - (0,GRAVITY,0)) * h
hipLaunchKernelGGL(( integrateVelocity), dim3(blocks),dim3(1), 0, 0, impacts, velocity, h);
}
|
2b9151c74955096e7b02de8987b8480788c27bd0.cu
|
// Includes
#include "CudaMath.h"
#include "Cloth.h"
// Computes the impacts between two points that are connected by a constraint in order to satisfy the constraint a little better.
__device__ float3 computeImpact(float3 me, float3 other, float stepsize, float h)
{
const float aimedDistance = 1.0 / (float)RESOLUTION_X;
float3 dir = other-me;
float ldir = length(dir);
if (ldir==0) return dir;
float e = (ldir - aimedDistance) * 0.5;
return dir/ldir * e / (h*h) * stepsize;
}
// Simple collision detection against a sphere at (0,0,0) with radius SPHERE_RADIUS and skin width SKIN_WIDTH.
__device__ float3 sphereCollision(float3 p, float h)
{
// TODO: Testen, ob Punkt im inneren der Kugel ist. Wenn ja, dann einen Impuls berechnen, der sie wieder heraus bewegt.
float3 impulse = make_float3(0,0,0);
float dist = length(p);
if(dist < (SPHERE_RADIUS + SKIN_WIDTH))
{
float3 s = normalize(p) * ((SPHERE_RADIUS + SKIN_WIDTH)-dist);
impulse = s/(h*h);
}
return impulse;
}
// -----------------------------------------------------------------------------------------------
// Aufsummieren der Impulse, die von den benachbarten Gitterpunkten ausgeuebt werden.
// impacts += ...
__global__ void computeImpacts(float3* oldPos, float3* impacts, float stepsize, float h)
{
// TODO: Positionen der benachbarten Gitterpunkte und des eigenen Gitterpunktes ablesen.
// own position:
int index = 0;
int X = threadIdx.x + blockIdx.x * blockDim.x;
int Y = threadIdx.y + blockIdx.y * blockDim.y;
int line = RESOLUTION_Y;
//index = Y + X * line;
index = Y + X * RESOLUTION_Y;
// neighbors (4-connected)
int indexN0 = (Y > 0)? Y-1 + X * line : -1;
int indexN1 = (X > 0)? Y + (X-1) * line : -1;
int indexN2 = (Y < line-1)? Y+1 + X * line : -1;
int indexN3 = (X < blockDim.x * gridDim.x-1)? Y + (X+1) * line : -1;
// TODO: Kollisionsbehandlung mit Kugel durchfuehren.
//oldPos[index] = sphereCollision(oldPos[index], h);
// TODO: Mit jedem Nachbar besteht ein Constraint. Dementsprechend fuer jeden Nachbar
// computeImpact aufrufen und die Ergebnisse aufsummieren.
float3 impact = sphereCollision(oldPos[index], h);
// neighbors also mustn't enter the sphere
/*impact += (indexN0 == -1)? zeroVec : computeImpact(oldPos[index], sphereCollision(oldPos[indexN0], h), stepsize, h);
impact += (indexN1 == -1)? zeroVec : computeImpact(oldPos[index], sphereCollision(oldPos[indexN1], h), stepsize, h);
impact += (indexN2 == -1)? zeroVec : computeImpact(oldPos[index], sphereCollision(oldPos[indexN2], h), stepsize, h);
impact += (indexN3 == -1)? zeroVec : computeImpact(oldPos[index], sphereCollision(oldPos[indexN3], h), stepsize, h);
*/
if(indexN0 != -1)
impact += computeImpact(oldPos[index], oldPos[indexN0], stepsize, h);
if(indexN1 != -1)
impact += computeImpact(oldPos[index], oldPos[indexN1], stepsize, h);
if(indexN2 != -1)
impact += computeImpact(oldPos[index], oldPos[indexN2], stepsize, h);
if(indexN3 != -1)
impact += computeImpact(oldPos[index], oldPos[indexN3], stepsize, h);
// TODO: Die Summe der Impulse auf "impacts" des eigenen Gitterpunkts addieren.
impacts[index] += impact;
}
// -----------------------------------------------------------------------------------------------
// Preview-Step
// newpos = oldpos + (velocity + impacts * h) * h
__global__ void previewSteps( float3* newPos, float3* oldPos, float3* impacts, float3* velocity,
float h)
{
// TODO: Berechnen, wo wir waeren, wenn wir eine Integration von der bisherigen Position
// mit der bisherigen Geschwindigkeit und den neuen Impulsen durchfuehren.
int index = 0;
// Index berechnen
int X = threadIdx.x + blockIdx.x * blockDim.x;
int Y = threadIdx.y + blockIdx.y * blockDim.y;
//index = Y + X * blockDim.y * gridDim.y;
index = Y + X * RESOLUTION_Y;
// TODO: don't check this here but find a proper way to not call it at all? ###########################################################################################
//if( Y == blockDim.y*gridDim.y-1)
// return;
newPos[index] = oldPos[index] + (velocity[index] + impacts[index] * h) * h;
}
// -----------------------------------------------------------------------------------------------
// Integrate velocity
// velocity = velocity * LINEAR_DAMPING + (impacts - (0,GRAVITY,0)) * h
__global__ void integrateVelocity( float3* impacts, float3* velocity, float h)
{
int index = 0;
// Index berechnen
int X = threadIdx.x + blockIdx.x * blockDim.x;
int Y = threadIdx.y + blockIdx.y * blockDim.y;
index = Y + X * RESOLUTION_Y;
// TODO: Update velocity.
velocity[index] = velocity[index] * LINEAR_DAMPING + (impacts[index] - make_float3(0,GRAVITY,0)) * h;
}
// -----------------------------------------------------------------------------------------------
// Test-Funktion die nur dazu da ist, damit man etwas sieht, sobald die VBOs gemapped werden...
__global__ void test( float3* newPos, float3* oldPos, float h)
{
int index = 0;
// Index berechnen
int X = threadIdx.x + blockIdx.x * blockDim.x;
int Y = threadIdx.y + blockIdx.y * blockDim.y;
index = Y + X * blockDim.y * gridDim.y;
// TODO: don't check this here but find a proper way to not call it at all? ###########################################################################################
if( Y == blockDim.y*gridDim.y-1)
return;
newPos[index] = oldPos[index] + make_float3(0, -h/20, 0);
newPos[index] = sphereCollision(newPos[index], h);
}
// -----------------------------------------------------------------------------------------------
void updateCloth( float3* newPos, float3* oldPos, float3* impacts, float3* velocity,
float h, float stepsize)
{
dim3 blocks(RESOLUTION_X, RESOLUTION_Y-1, 1);
// -----------------------------
// Clear impacts
cudaMemset(impacts, 0, RESOLUTION_X*RESOLUTION_Y*sizeof(float3));
// Updating constraints is an iterative process.
// The more iterations we apply, the stiffer the cloth become.
for (int i=0; i<10; ++i)
{
// -----------------------------
// TODO: previewSteps Kernel aufrufen (Vorhersagen, wo die Gitterpunkte mit den aktuellen Impulsen landen wuerden.)
// newpos = oldpos + (velocity + impacts * h) * h
//previewSteps<<<blocks,1>>>(newPos, oldPos, impacts, velocity, h);
previewSteps<<<dim3(RESOLUTION_X,RESOLUTION_Y-1, 1),1>>>(newPos, oldPos, impacts, velocity, h);
// -----------------------------
// TODO: computeImpacts Kernel aufrufen (Die Impulse neu berechnen, sodass die Constraints besser eingehalten werden.)
// impacts += ...
computeImpacts<<<blocks,1>>>(newPos, impacts, stepsize, h);
}
// call test fuction to see if the mapping has worked
//test<<<blocks, 1>>>(newPos, oldPos, h);
// -----------------------------
// TODO: Approximieren der Normalen ###################################################################################################################################
// -----------------------------
// TODO: Integrate velocity kernel ausfuehren
// Der kernel berechnet: velocity = velocity * LINEAR_DAMPING + (impacts - (0,GRAVITY,0)) * h
integrateVelocity<<<blocks,1>>>(impacts, velocity, h);
}
|
f47422e9ccd6e32b3fae174f50afdf77f2e1d282.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cugraph/algorithms.hpp>
#include <cugraph/experimental/graph_view.hpp>
#include <cugraph/prims/reduce_op.cuh>
#include <cugraph/prims/update_frontier_v_push_if_out_nbr.cuh>
#include <cugraph/prims/vertex_frontier.cuh>
#include <cugraph/utilities/error.hpp>
#include <cugraph/vertex_partition_device_view.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <raft/handle.hpp>
#include <thrust/fill.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/optional.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <limits>
#include <type_traits>
namespace cugraph {
namespace experimental {
namespace detail {
template <typename GraphViewType, typename PredecessorIterator>
void bfs(raft::handle_t const& handle,
GraphViewType const& push_graph_view,
typename GraphViewType::vertex_type* distances,
PredecessorIterator predecessor_first,
typename GraphViewType::vertex_type source_vertex,
bool direction_optimizing,
typename GraphViewType::vertex_type depth_limit,
bool do_expensive_check)
{
using vertex_t = typename GraphViewType::vertex_type;
static_assert(std::is_integral<vertex_t>::value,
"GraphViewType::vertex_type should be integral.");
static_assert(!GraphViewType::is_adj_matrix_transposed,
"GraphViewType should support the push model.");
auto const num_vertices = push_graph_view.get_number_of_vertices();
if (num_vertices == 0) { return; }
// 1. check input arguments
CUGRAPH_EXPECTS(
push_graph_view.is_symmetric() || !direction_optimizing,
"Invalid input argument: input graph should be symmetric for direction optimizing BFS.");
CUGRAPH_EXPECTS(push_graph_view.is_valid_vertex(source_vertex),
"Invalid input argument: source vertex out-of-range.");
if (do_expensive_check) {
// nothing to do
}
// 2. initialize distances and predecessors
auto constexpr invalid_distance = std::numeric_limits<vertex_t>::max();
auto constexpr invalid_vertex = invalid_vertex_id<vertex_t>::value;
auto val_first = thrust::make_zip_iterator(thrust::make_tuple(distances, predecessor_first));
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
thrust::make_counting_iterator(push_graph_view.get_local_vertex_first()),
thrust::make_counting_iterator(push_graph_view.get_local_vertex_last()),
val_first,
[source_vertex] __device__(auto val) {
auto distance = invalid_distance;
if (val == source_vertex) { distance = vertex_t{0}; }
return thrust::make_tuple(distance, invalid_vertex);
});
// 3. initialize BFS frontier
enum class Bucket { cur, next, num_buckets };
VertexFrontier<vertex_t,
void,
GraphViewType::is_multi_gpu,
static_cast<size_t>(Bucket::num_buckets)>
vertex_frontier(handle);
if (push_graph_view.is_local_vertex_nocheck(source_vertex)) {
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).insert(source_vertex);
}
// 4. BFS iteration
vertex_t depth{0};
while (true) {
if (direction_optimizing) {
CUGRAPH_FAIL("unimplemented.");
} else {
auto vertex_partition = vertex_partition_device_view_t<vertex_t, GraphViewType::is_multi_gpu>(
push_graph_view.get_vertex_partition_view());
update_frontier_v_push_if_out_nbr(
handle,
push_graph_view,
vertex_frontier,
static_cast<size_t>(Bucket::cur),
std::vector<size_t>{static_cast<size_t>(Bucket::next)},
thrust::make_constant_iterator(0) /* dummy */,
thrust::make_constant_iterator(0) /* dummy */,
[vertex_partition, distances] __device__(
vertex_t src, vertex_t dst, auto src_val, auto dst_val) {
auto push = true;
if (vertex_partition.is_local_vertex_nocheck(dst)) {
auto distance =
*(distances + vertex_partition.get_local_vertex_offset_from_vertex_nocheck(dst));
if (distance != invalid_distance) { push = false; }
}
return push ? thrust::optional<vertex_t>{src} : thrust::nullopt;
},
reduce_op::any<vertex_t>(),
distances,
thrust::make_zip_iterator(thrust::make_tuple(distances, predecessor_first)),
[depth] __device__(auto v, auto v_val, auto pushed_val) {
return (v_val == invalid_distance)
? thrust::optional<
thrust::tuple<size_t, thrust::tuple<vertex_t, vertex_t>>>{thrust::make_tuple(
static_cast<size_t>(Bucket::next),
thrust::make_tuple(depth + 1, pushed_val))}
: thrust::nullopt;
});
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).clear();
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).shrink_to_fit();
vertex_frontier.swap_buckets(static_cast<size_t>(Bucket::cur),
static_cast<size_t>(Bucket::next));
if (vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).aggregate_size() == 0) {
break;
}
}
depth++;
if (depth >= depth_limit) { break; }
}
CUDA_TRY(hipStreamSynchronize(
handle.get_stream())); // this is as necessary vertex_frontier will become out-of-scope once
// this function returns (FIXME: should I stream sync in VertexFrontier
// destructor?)
}
} // namespace detail
template <typename vertex_t, typename edge_t, typename weight_t, bool multi_gpu>
void bfs(raft::handle_t const& handle,
graph_view_t<vertex_t, edge_t, weight_t, false, multi_gpu> const& graph_view,
vertex_t* distances,
vertex_t* predecessors,
vertex_t source_vertex,
bool direction_optimizing,
vertex_t depth_limit,
bool do_expensive_check)
{
if (predecessors != nullptr) {
detail::bfs(handle,
graph_view,
distances,
predecessors,
source_vertex,
direction_optimizing,
depth_limit,
do_expensive_check);
} else {
detail::bfs(handle,
graph_view,
distances,
thrust::make_discard_iterator(),
source_vertex,
direction_optimizing,
depth_limit,
do_expensive_check);
}
}
// explicit instantiation
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int32_t, float, false, true> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int32_t, double, false, true> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int64_t, float, false, true> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int64_t, double, false, true> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int64_t, int64_t, float, false, true> const& graph_view,
int64_t* distances,
int64_t* predecessors,
int64_t source_vertex,
bool direction_optimizing,
int64_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int64_t, int64_t, double, false, true> const& graph_view,
int64_t* distances,
int64_t* predecessors,
int64_t source_vertex,
bool direction_optimizing,
int64_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int32_t, float, false, false> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int32_t, double, false, false> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int64_t, float, false, false> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int64_t, double, false, false> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int64_t, int64_t, float, false, false> const& graph_view,
int64_t* distances,
int64_t* predecessors,
int64_t source_vertex,
bool direction_optimizing,
int64_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int64_t, int64_t, double, false, false> const& graph_view,
int64_t* distances,
int64_t* predecessors,
int64_t source_vertex,
bool direction_optimizing,
int64_t depth_limit,
bool do_expensive_check);
} // namespace experimental
} // namespace cugraph
|
f47422e9ccd6e32b3fae174f50afdf77f2e1d282.cu
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cugraph/algorithms.hpp>
#include <cugraph/experimental/graph_view.hpp>
#include <cugraph/prims/reduce_op.cuh>
#include <cugraph/prims/update_frontier_v_push_if_out_nbr.cuh>
#include <cugraph/prims/vertex_frontier.cuh>
#include <cugraph/utilities/error.hpp>
#include <cugraph/vertex_partition_device_view.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <raft/handle.hpp>
#include <thrust/fill.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/optional.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <limits>
#include <type_traits>
namespace cugraph {
namespace experimental {
namespace detail {
template <typename GraphViewType, typename PredecessorIterator>
void bfs(raft::handle_t const& handle,
GraphViewType const& push_graph_view,
typename GraphViewType::vertex_type* distances,
PredecessorIterator predecessor_first,
typename GraphViewType::vertex_type source_vertex,
bool direction_optimizing,
typename GraphViewType::vertex_type depth_limit,
bool do_expensive_check)
{
using vertex_t = typename GraphViewType::vertex_type;
static_assert(std::is_integral<vertex_t>::value,
"GraphViewType::vertex_type should be integral.");
static_assert(!GraphViewType::is_adj_matrix_transposed,
"GraphViewType should support the push model.");
auto const num_vertices = push_graph_view.get_number_of_vertices();
if (num_vertices == 0) { return; }
// 1. check input arguments
CUGRAPH_EXPECTS(
push_graph_view.is_symmetric() || !direction_optimizing,
"Invalid input argument: input graph should be symmetric for direction optimizing BFS.");
CUGRAPH_EXPECTS(push_graph_view.is_valid_vertex(source_vertex),
"Invalid input argument: source vertex out-of-range.");
if (do_expensive_check) {
// nothing to do
}
// 2. initialize distances and predecessors
auto constexpr invalid_distance = std::numeric_limits<vertex_t>::max();
auto constexpr invalid_vertex = invalid_vertex_id<vertex_t>::value;
auto val_first = thrust::make_zip_iterator(thrust::make_tuple(distances, predecessor_first));
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
thrust::make_counting_iterator(push_graph_view.get_local_vertex_first()),
thrust::make_counting_iterator(push_graph_view.get_local_vertex_last()),
val_first,
[source_vertex] __device__(auto val) {
auto distance = invalid_distance;
if (val == source_vertex) { distance = vertex_t{0}; }
return thrust::make_tuple(distance, invalid_vertex);
});
// 3. initialize BFS frontier
enum class Bucket { cur, next, num_buckets };
VertexFrontier<vertex_t,
void,
GraphViewType::is_multi_gpu,
static_cast<size_t>(Bucket::num_buckets)>
vertex_frontier(handle);
if (push_graph_view.is_local_vertex_nocheck(source_vertex)) {
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).insert(source_vertex);
}
// 4. BFS iteration
vertex_t depth{0};
while (true) {
if (direction_optimizing) {
CUGRAPH_FAIL("unimplemented.");
} else {
auto vertex_partition = vertex_partition_device_view_t<vertex_t, GraphViewType::is_multi_gpu>(
push_graph_view.get_vertex_partition_view());
update_frontier_v_push_if_out_nbr(
handle,
push_graph_view,
vertex_frontier,
static_cast<size_t>(Bucket::cur),
std::vector<size_t>{static_cast<size_t>(Bucket::next)},
thrust::make_constant_iterator(0) /* dummy */,
thrust::make_constant_iterator(0) /* dummy */,
[vertex_partition, distances] __device__(
vertex_t src, vertex_t dst, auto src_val, auto dst_val) {
auto push = true;
if (vertex_partition.is_local_vertex_nocheck(dst)) {
auto distance =
*(distances + vertex_partition.get_local_vertex_offset_from_vertex_nocheck(dst));
if (distance != invalid_distance) { push = false; }
}
return push ? thrust::optional<vertex_t>{src} : thrust::nullopt;
},
reduce_op::any<vertex_t>(),
distances,
thrust::make_zip_iterator(thrust::make_tuple(distances, predecessor_first)),
[depth] __device__(auto v, auto v_val, auto pushed_val) {
return (v_val == invalid_distance)
? thrust::optional<
thrust::tuple<size_t, thrust::tuple<vertex_t, vertex_t>>>{thrust::make_tuple(
static_cast<size_t>(Bucket::next),
thrust::make_tuple(depth + 1, pushed_val))}
: thrust::nullopt;
});
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).clear();
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).shrink_to_fit();
vertex_frontier.swap_buckets(static_cast<size_t>(Bucket::cur),
static_cast<size_t>(Bucket::next));
if (vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).aggregate_size() == 0) {
break;
}
}
depth++;
if (depth >= depth_limit) { break; }
}
CUDA_TRY(cudaStreamSynchronize(
handle.get_stream())); // this is as necessary vertex_frontier will become out-of-scope once
// this function returns (FIXME: should I stream sync in VertexFrontier
// destructor?)
}
} // namespace detail
template <typename vertex_t, typename edge_t, typename weight_t, bool multi_gpu>
void bfs(raft::handle_t const& handle,
graph_view_t<vertex_t, edge_t, weight_t, false, multi_gpu> const& graph_view,
vertex_t* distances,
vertex_t* predecessors,
vertex_t source_vertex,
bool direction_optimizing,
vertex_t depth_limit,
bool do_expensive_check)
{
if (predecessors != nullptr) {
detail::bfs(handle,
graph_view,
distances,
predecessors,
source_vertex,
direction_optimizing,
depth_limit,
do_expensive_check);
} else {
detail::bfs(handle,
graph_view,
distances,
thrust::make_discard_iterator(),
source_vertex,
direction_optimizing,
depth_limit,
do_expensive_check);
}
}
// explicit instantiation
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int32_t, float, false, true> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int32_t, double, false, true> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int64_t, float, false, true> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int64_t, double, false, true> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int64_t, int64_t, float, false, true> const& graph_view,
int64_t* distances,
int64_t* predecessors,
int64_t source_vertex,
bool direction_optimizing,
int64_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int64_t, int64_t, double, false, true> const& graph_view,
int64_t* distances,
int64_t* predecessors,
int64_t source_vertex,
bool direction_optimizing,
int64_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int32_t, float, false, false> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int32_t, double, false, false> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int64_t, float, false, false> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int64_t, double, false, false> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int64_t, int64_t, float, false, false> const& graph_view,
int64_t* distances,
int64_t* predecessors,
int64_t source_vertex,
bool direction_optimizing,
int64_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int64_t, int64_t, double, false, false> const& graph_view,
int64_t* distances,
int64_t* predecessors,
int64_t source_vertex,
bool direction_optimizing,
int64_t depth_limit,
bool do_expensive_check);
} // namespace experimental
} // namespace cugraph
|
666f803d2c971a85bbc515667e9f3fb602b3727d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
#include <stdlib.h>
#include <time.h>
#define GpuErrorCheck(ans) { GpuAssert((ans), __FILE__, __LINE__); }
inline void GpuAssert(hipError_t code, const char* file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#if 0
__global__ void prefixScan(int* num, int n)
{
int i = threadIdx.x;
// Bottom-up
//
// thread id
// 1 0
// array
// 0 1 2 3
// 0 0+1 2 2+3
// 0 0+1 2 0+1+2+3
for (int step = 1; step < n; step *= 2)
{
__syncthreads();
if (i % step == 0)
{
int rightIdx = n - 1 - 2 * i;
int leftIdx = rightIdx - step;
num[rightIdx] += num[leftIdx];
}
}
// Top-down
//
// thread id
// 1 0
// array
// 0 0+1 2 "0"
// 0 "0" 2 0+1
// "0" 0 0+1 0+1+2
if (i == 0)
{
num[n - 1] = 0;
}
for (int step = n >> 1; step >= 1; step >>= 1)
{
__syncthreads();
if (i % step == 0)
{
int rightIdx = n - 1 - 2 * i;
int leftIdx = rightIdx - step;
int left = num[leftIdx];
int right = num[rightIdx];
num[leftIdx] = right;
num[rightIdx] = left + right;
}
}
}
extern __shared__ int lds[];
__global__ void prefixScanLds(int* num, int n)
{
int i = threadIdx.x;
lds[i * 2] = num[i * 2];
lds[i * 2 + 1] = num[i * 2 + 1];
for (int step = 1; step < n; step *= 2)
{
__syncthreads();
if (i % step == 0)
{
int rightIdx = n - 1 - 2 * i;
int leftIdx = rightIdx - step;
lds[rightIdx] += lds[leftIdx];
}
}
if (i == 0)
{
lds[n - 1] = 0;
}
for (int step = n >> 1; step >= 1; step >>= 1)
{
__syncthreads();
if (i % step == 0)
{
int rightIdx = n - 1 - 2 * i;
int leftIdx = rightIdx - step;
int left = lds[leftIdx];
int right = lds[rightIdx];
lds[leftIdx] = right;
lds[rightIdx] = left + right;
}
}
__syncthreads();
num[i * 2] = lds[i * 2];
num[i * 2 + 1] = lds[i * 2 + 1];
}
__global__ void prefixScanV3(int* num, int n)
{
int i = threadIdx.x;
lds[i * 2] = num[i * 2];
lds[i * 2 + 1] = num[i * 2 + 1];
__syncthreads();
// Bottom-up
//
// array
// 0 1 2 3 4 5 6 7
// thread id
// 3 2 1 0
// array
// 0 0+1 2 2+3 4 4+5 6 6+7
// thread id
// 1 0
// array
// 0 0+1 2 0+1+2+3 4 4+5 6 4+5+6+7
// thread id
// 0
// array
// 0 0+1 2 0+1+2+3 4 4+5 6 0+1+2+3+4+5+6+7
for (int step = 1; step < n; step *= 2)
{
if (i < n / 2 / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
lds[rightIdx] += lds[leftIdx];
}
__syncthreads();
}
if (i == 0)
{
lds[n - 1] = 0;
}
__syncthreads();
for (int step = n >> 1; step >= 1; step >>= 1)
{
if (i < n / 2 / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
int left = lds[leftIdx];
int right = lds[rightIdx];
lds[leftIdx] = right;
lds[rightIdx] = left + right;
}
__syncthreads();
}
num[i * 2] = lds[i * 2];
num[i * 2 + 1] = lds[i * 2 + 1];
}
template<int n>
__forceinline__ __device__ void scan()
{
int i = threadIdx.x;
int step;
#pragma unroll
for (step = 1; step < 32; step *= 2)
{
if (i < n / 2 / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
lds[rightIdx] += lds[leftIdx];
}
}
#pragma unroll
for (; step < n; step *= 2)
{
if (i < n / 2 / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
lds[rightIdx] += lds[leftIdx];
}
__syncthreads();
}
if (i == 0)
{
lds[n - 1] = 0;
}
__syncthreads();
#pragma unroll
for (step = n >> 1; step >= 32; step >>= 1)
{
if (i < n / 2 / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
int left = lds[leftIdx];
int right = lds[rightIdx];
lds[leftIdx] = right;
lds[rightIdx] = left + right;
}
__syncthreads();
}
#pragma unroll
for (step = 16; step >= 1; step >>= 1)
{
if (i < n / 2 / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
int left = lds[leftIdx];
int right = lds[rightIdx];
lds[leftIdx] = right;
lds[rightIdx] = left + right;
}
}
}
template<int n, int m>
__global__ void prefixScanV4(int* num, int count)
{
int i = threadIdx.x;
int idx1 = blockIdx.x * n + i * 2;
int idx2 = blockIdx.x * n + i * 2 + 1;
lds[i * 2] = idx1 < count ? num[idx1] : 0;
lds[i * 2 + 1] = idx2 < count ? num[idx2] : 0;
__syncthreads();
scan<n>();
__syncthreads();
if (idx1 < count) num[idx1] = lds[i * 2];
if (idx2 < count) num[idx2] = lds[i * 2 + 1];
__syncthreads();
if (i < m / 2)
{
int idx3 = (i * 2) * n + 2047;
int idx4 = (i * 2 + 1) * n + 2047;
lds[i * 2] = idx3 < count ? num[idx3] : 0;
lds[i * 2 + 1] = idx4 < count ? num[idx4] : 0;
}
__syncthreads();
scan<m>();
__syncthreads();
if (idx1 < count) num[idx1] += lds[blockIdx.x];
if (idx2 < count) num[idx2] += lds[blockIdx.x];
}
#endif
template<int n>
__global__ void prefixScanV5_block(volatile int* num, volatile int* num2, int count)
{
extern __shared__ int lds[];
int i = threadIdx.x;
int idx1 = blockIdx.x * n + i * 2;
int idx2 = blockIdx.x * n + i * 2 + 1;
lds[i * 2] = idx1 < count ? num[idx1] : 0;
lds[i * 2 + 1] = idx2 < count ? num[idx2] : 0;
__syncthreads();
int step;
//#pragma unroll
for (step = 1; step < n; step *= 2)
{
__syncthreads();
if (i < (n / 2) / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
lds[rightIdx] += lds[leftIdx];
}
__syncthreads();
}
if (i == blockDim.x - 1)
{
lds[n - 1] = 0;
}
__syncthreads();
// #pragma unroll
for (step = n >> 1; step > 1; step >>= 1)
{
__syncthreads();
if (i < (n / 2) / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
int left = lds[leftIdx];
int right = lds[rightIdx];
lds[leftIdx] = right;
lds[rightIdx] = left + right;
}
__syncthreads();
}
if (i == blockDim.x - 1)
{
num2[blockIdx.x] = num[idx2] + lds[i * 2 + 1];
}
if (idx1 < count) num[idx1] = lds[i * 2];
if (idx2 < count) num[idx2] = lds[i * 2 + 1];
}
#if 0
template<int n>
__global__ void prefixScanV5_grid(volatile int* num, int count)
{
extern __shared__ int lds[];
int i = threadIdx.x;
int idx1 = i * 2;
int idx2 = i * 2 + 1;
lds[idx1] = idx1 < count ? num[idx1] : 0;
lds[idx2] = idx2 < count ? num[idx2] : 0;
__syncthreads();
int step;
#pragma unroll
for (step = 1; step < 32; step *= 2)
{
if (i < (n / 2) / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
lds[rightIdx] += lds[leftIdx];
}
}
#pragma unroll
for (step = 32; step < n; step *= 2)
{
if (i < (n / 2) / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
lds[rightIdx] += lds[leftIdx];
}
__syncthreads();
}
if (i == 0)
{
lds[n - 1] = 0;
}
__syncthreads();
#pragma unroll
for (step = n >> 1; step >= 32; step >>= 1)
{
if (i < (n / 2) / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
int left = lds[leftIdx];
int right = lds[rightIdx];
lds[leftIdx] = right;
lds[rightIdx] = left + right;
}
__syncthreads();
}
#pragma unroll
for (step = 16; step >= 1; step >>= 1)
{
if (i < (n / 2) / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
int left = lds[leftIdx];
int right = lds[rightIdx];
lds[leftIdx] = right;
lds[rightIdx] = left + right;
}
}
__syncthreads();
if (idx1 < count) num[idx1] = lds[idx1];
if (idx2 < count) num[idx2] = lds[idx2];
}
template<int n>
__global__ void prefixScanV5_add(volatile int* num, volatile int* num2, int count)
{
int i = threadIdx.x;
int j = blockIdx.x;
int idx1 = j * n + i * 2;
int idx2 = j * n + i * 2 + 1;
int blockScanRes = num2[j];
if (idx1 < count) num[idx1] += blockScanRes;
if (idx2 < count) num[idx2] += blockScanRes;
}
#endif
int getTwoExpPaddedSize(int n)
{
int res = 1;
while (n > res) res <<= 1;
return res;
}
int main()
{
srand(time(NULL));
const int numCount = 16; // max 4,194,304
const int numSize = numCount * sizeof(int);
int* num_host = new int[numCount];
int sum = 0;
std::cout << sum << ",";
for (int i = 0; i < numCount; ++i)
{
num_host[i] = rand() % 8;
sum += num_host[i];
if (i != numCount - 1)
std::cout << sum << ",";
}
sum -= num_host[numCount - 1];
std::cout << "\n\n\n";
std::cout << sum << "\n";
std::cout << "\n\n\n";
int* num_device;
int* gridNum_d;
hipMalloc((void**)& num_device, numSize);
hipMemcpy(num_device, num_host, numSize, hipMemcpyHostToDevice);
int gridDim = (numCount + 2047) / 2048;
int* gridNum_h = new int[gridDim];
hipMalloc((void**)& gridNum_d, gridDim * sizeof(int));
hipMemset(gridNum_d, 0, gridDim * sizeof(int));
int paddedGridDim = getTwoExpPaddedSize(gridDim);
#if 0
prefixScan << < dim3(1, 1, 1), dim3(padCount / 2, 1, 1) >> > (num_device, padCount);
prefixScanLds << < dim3(1, 1, 1), dim3(padCount / 2, 1, 1), padSize >> > (num_device, padCount);
prefixScanV3 << < dim3(1, 1, 1), dim3(padCount / 2, 1, 1), padSize >> > (num_device, padCount);
switch (paddedGridDim)
{
case 1: prefixScanV4 <2048, 1> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 2: prefixScanV4 <2048, 2> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 4: prefixScanV4 <2048, 4> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 8: prefixScanV4 <2048, 8> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 16: prefixScanV4 <2048, 16> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 32: prefixScanV4 <2048, 32> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 64: prefixScanV4 <2048, 64> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 128: prefixScanV4 <2048, 128> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 256: prefixScanV4 <2048, 256> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 512: prefixScanV4 <2048, 512> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 1024: prefixScanV4 <2048, 1024> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 2048: prefixScanV4 <2048, 2048> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
}
#endif
prefixScanV5_block <16> << < dim3(gridDim, 1, 1), dim3(8, 1, 1), 16 * sizeof(int) >> > (num_device, gridNum_d, numCount);
hipDeviceSynchronize();
GpuErrorCheck(hipDeviceSynchronize());
GpuErrorCheck(hipPeekAtLastError());
// switch (paddedGridDim)
// {
// case 1: prefixScanV5_grid <1> << < dim3(1, 1, 1), dim3(1, 1, 1), 1 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 2: prefixScanV5_grid <2> << < dim3(1, 1, 1), dim3(1, 1, 1), 2 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 4: prefixScanV5_grid <4> << < dim3(1, 1, 1), dim3(2, 1, 1), 4 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 8: prefixScanV5_grid <8> << < dim3(1, 1, 1), dim3(4, 1, 1), 8 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 16: prefixScanV5_grid <16> << < dim3(1, 1, 1), dim3(8, 1, 1), 16 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 32: prefixScanV5_grid <32> << < dim3(1, 1, 1), dim3(16, 1, 1), 32 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 64: prefixScanV5_grid <64> << < dim3(1, 1, 1), dim3(32, 1, 1), 64 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 128: prefixScanV5_grid <128> << < dim3(1, 1, 1), dim3(64, 1, 1), 128 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 256: prefixScanV5_grid <256> << < dim3(1, 1, 1), dim3(128, 1, 1), 256 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 512: prefixScanV5_grid <512> << < dim3(1, 1, 1), dim3(256, 1, 1), 512 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 1024: prefixScanV5_grid <1024> << < dim3(1, 1, 1), dim3(512, 1, 1), 1024 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 2048: prefixScanV5_grid <2048> << < dim3(1, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (gridNum_d, gridDim); break;
// }
// GpuErrorCheck(hipDeviceSynchronize());
// GpuErrorCheck(hipPeekAtLastError());
// prefixScanV5_add<2048> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1) >> > (num_device, gridNum_d, numCount);
// GpuErrorCheck(hipDeviceSynchronize());
// GpuErrorCheck(hipPeekAtLastError());
hipMemcpy(num_host, num_device, numSize, hipMemcpyDeviceToHost);
hipMemcpy(gridNum_h, gridNum_d, gridDim * sizeof(int), hipMemcpyDeviceToHost);
std::cout << "\n\n\n";
for (int i = 0; i < gridDim; ++i)
{
std::cout << gridNum_h[i] << ",";
}
std::cout << "\n\n\n";
for (int i = 0; i < numCount; ++i)
{
std::cout << num_host[i] << ",";
}
std::cout << "\n\n\n";
std::cout << num_host[numCount - 1] << "\n";
delete num_host;
delete gridNum_h;
hipFree(gridNum_d);
hipFree(num_device);
return 0;
}
|
666f803d2c971a85bbc515667e9f3fb602b3727d.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda_runtime.h>
#include <stdio.h>
#include <iostream>
#include <stdlib.h>
#include <time.h>
#define GpuErrorCheck(ans) { GpuAssert((ans), __FILE__, __LINE__); }
inline void GpuAssert(cudaError_t code, const char* file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#if 0
__global__ void prefixScan(int* num, int n)
{
int i = threadIdx.x;
// Bottom-up
//
// thread id
// 1 0
// array
// 0 1 2 3
// 0 0+1 2 2+3
// 0 0+1 2 0+1+2+3
for (int step = 1; step < n; step *= 2)
{
__syncthreads();
if (i % step == 0)
{
int rightIdx = n - 1 - 2 * i;
int leftIdx = rightIdx - step;
num[rightIdx] += num[leftIdx];
}
}
// Top-down
//
// thread id
// 1 0
// array
// 0 0+1 2 "0"
// 0 "0" 2 0+1
// "0" 0 0+1 0+1+2
if (i == 0)
{
num[n - 1] = 0;
}
for (int step = n >> 1; step >= 1; step >>= 1)
{
__syncthreads();
if (i % step == 0)
{
int rightIdx = n - 1 - 2 * i;
int leftIdx = rightIdx - step;
int left = num[leftIdx];
int right = num[rightIdx];
num[leftIdx] = right;
num[rightIdx] = left + right;
}
}
}
extern __shared__ int lds[];
__global__ void prefixScanLds(int* num, int n)
{
int i = threadIdx.x;
lds[i * 2] = num[i * 2];
lds[i * 2 + 1] = num[i * 2 + 1];
for (int step = 1; step < n; step *= 2)
{
__syncthreads();
if (i % step == 0)
{
int rightIdx = n - 1 - 2 * i;
int leftIdx = rightIdx - step;
lds[rightIdx] += lds[leftIdx];
}
}
if (i == 0)
{
lds[n - 1] = 0;
}
for (int step = n >> 1; step >= 1; step >>= 1)
{
__syncthreads();
if (i % step == 0)
{
int rightIdx = n - 1 - 2 * i;
int leftIdx = rightIdx - step;
int left = lds[leftIdx];
int right = lds[rightIdx];
lds[leftIdx] = right;
lds[rightIdx] = left + right;
}
}
__syncthreads();
num[i * 2] = lds[i * 2];
num[i * 2 + 1] = lds[i * 2 + 1];
}
__global__ void prefixScanV3(int* num, int n)
{
int i = threadIdx.x;
lds[i * 2] = num[i * 2];
lds[i * 2 + 1] = num[i * 2 + 1];
__syncthreads();
// Bottom-up
//
// array
// 0 1 2 3 4 5 6 7
// thread id
// 3 2 1 0
// array
// 0 0+1 2 2+3 4 4+5 6 6+7
// thread id
// 1 0
// array
// 0 0+1 2 0+1+2+3 4 4+5 6 4+5+6+7
// thread id
// 0
// array
// 0 0+1 2 0+1+2+3 4 4+5 6 0+1+2+3+4+5+6+7
for (int step = 1; step < n; step *= 2)
{
if (i < n / 2 / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
lds[rightIdx] += lds[leftIdx];
}
__syncthreads();
}
if (i == 0)
{
lds[n - 1] = 0;
}
__syncthreads();
for (int step = n >> 1; step >= 1; step >>= 1)
{
if (i < n / 2 / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
int left = lds[leftIdx];
int right = lds[rightIdx];
lds[leftIdx] = right;
lds[rightIdx] = left + right;
}
__syncthreads();
}
num[i * 2] = lds[i * 2];
num[i * 2 + 1] = lds[i * 2 + 1];
}
template<int n>
__forceinline__ __device__ void scan()
{
int i = threadIdx.x;
int step;
#pragma unroll
for (step = 1; step < 32; step *= 2)
{
if (i < n / 2 / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
lds[rightIdx] += lds[leftIdx];
}
}
#pragma unroll
for (; step < n; step *= 2)
{
if (i < n / 2 / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
lds[rightIdx] += lds[leftIdx];
}
__syncthreads();
}
if (i == 0)
{
lds[n - 1] = 0;
}
__syncthreads();
#pragma unroll
for (step = n >> 1; step >= 32; step >>= 1)
{
if (i < n / 2 / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
int left = lds[leftIdx];
int right = lds[rightIdx];
lds[leftIdx] = right;
lds[rightIdx] = left + right;
}
__syncthreads();
}
#pragma unroll
for (step = 16; step >= 1; step >>= 1)
{
if (i < n / 2 / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
int left = lds[leftIdx];
int right = lds[rightIdx];
lds[leftIdx] = right;
lds[rightIdx] = left + right;
}
}
}
template<int n, int m>
__global__ void prefixScanV4(int* num, int count)
{
int i = threadIdx.x;
int idx1 = blockIdx.x * n + i * 2;
int idx2 = blockIdx.x * n + i * 2 + 1;
lds[i * 2] = idx1 < count ? num[idx1] : 0;
lds[i * 2 + 1] = idx2 < count ? num[idx2] : 0;
__syncthreads();
scan<n>();
__syncthreads();
if (idx1 < count) num[idx1] = lds[i * 2];
if (idx2 < count) num[idx2] = lds[i * 2 + 1];
__syncthreads();
if (i < m / 2)
{
int idx3 = (i * 2) * n + 2047;
int idx4 = (i * 2 + 1) * n + 2047;
lds[i * 2] = idx3 < count ? num[idx3] : 0;
lds[i * 2 + 1] = idx4 < count ? num[idx4] : 0;
}
__syncthreads();
scan<m>();
__syncthreads();
if (idx1 < count) num[idx1] += lds[blockIdx.x];
if (idx2 < count) num[idx2] += lds[blockIdx.x];
}
#endif
template<int n>
__global__ void prefixScanV5_block(volatile int* num, volatile int* num2, int count)
{
extern __shared__ int lds[];
int i = threadIdx.x;
int idx1 = blockIdx.x * n + i * 2;
int idx2 = blockIdx.x * n + i * 2 + 1;
lds[i * 2] = idx1 < count ? num[idx1] : 0;
lds[i * 2 + 1] = idx2 < count ? num[idx2] : 0;
__syncthreads();
int step;
//#pragma unroll
for (step = 1; step < n; step *= 2)
{
__syncthreads();
if (i < (n / 2) / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
lds[rightIdx] += lds[leftIdx];
}
__syncthreads();
}
if (i == blockDim.x - 1)
{
lds[n - 1] = 0;
}
__syncthreads();
// #pragma unroll
for (step = n >> 1; step > 1; step >>= 1)
{
__syncthreads();
if (i < (n / 2) / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
int left = lds[leftIdx];
int right = lds[rightIdx];
lds[leftIdx] = right;
lds[rightIdx] = left + right;
}
__syncthreads();
}
if (i == blockDim.x - 1)
{
num2[blockIdx.x] = num[idx2] + lds[i * 2 + 1];
}
if (idx1 < count) num[idx1] = lds[i * 2];
if (idx2 < count) num[idx2] = lds[i * 2 + 1];
}
#if 0
template<int n>
__global__ void prefixScanV5_grid(volatile int* num, int count)
{
extern __shared__ int lds[];
int i = threadIdx.x;
int idx1 = i * 2;
int idx2 = i * 2 + 1;
lds[idx1] = idx1 < count ? num[idx1] : 0;
lds[idx2] = idx2 < count ? num[idx2] : 0;
__syncthreads();
int step;
#pragma unroll
for (step = 1; step < 32; step *= 2)
{
if (i < (n / 2) / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
lds[rightIdx] += lds[leftIdx];
}
}
#pragma unroll
for (step = 32; step < n; step *= 2)
{
if (i < (n / 2) / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
lds[rightIdx] += lds[leftIdx];
}
__syncthreads();
}
if (i == 0)
{
lds[n - 1] = 0;
}
__syncthreads();
#pragma unroll
for (step = n >> 1; step >= 32; step >>= 1)
{
if (i < (n / 2) / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
int left = lds[leftIdx];
int right = lds[rightIdx];
lds[leftIdx] = right;
lds[rightIdx] = left + right;
}
__syncthreads();
}
#pragma unroll
for (step = 16; step >= 1; step >>= 1)
{
if (i < (n / 2) / step)
{
int rightIdx = n - 1 - 2 * i * step;
int leftIdx = rightIdx - step;
int left = lds[leftIdx];
int right = lds[rightIdx];
lds[leftIdx] = right;
lds[rightIdx] = left + right;
}
}
__syncthreads();
if (idx1 < count) num[idx1] = lds[idx1];
if (idx2 < count) num[idx2] = lds[idx2];
}
template<int n>
__global__ void prefixScanV5_add(volatile int* num, volatile int* num2, int count)
{
int i = threadIdx.x;
int j = blockIdx.x;
int idx1 = j * n + i * 2;
int idx2 = j * n + i * 2 + 1;
int blockScanRes = num2[j];
if (idx1 < count) num[idx1] += blockScanRes;
if (idx2 < count) num[idx2] += blockScanRes;
}
#endif
int getTwoExpPaddedSize(int n)
{
int res = 1;
while (n > res) res <<= 1;
return res;
}
int main()
{
srand(time(NULL));
const int numCount = 16; // max 4,194,304
const int numSize = numCount * sizeof(int);
int* num_host = new int[numCount];
int sum = 0;
std::cout << sum << ",";
for (int i = 0; i < numCount; ++i)
{
num_host[i] = rand() % 8;
sum += num_host[i];
if (i != numCount - 1)
std::cout << sum << ",";
}
sum -= num_host[numCount - 1];
std::cout << "\n\n\n";
std::cout << sum << "\n";
std::cout << "\n\n\n";
int* num_device;
int* gridNum_d;
cudaMalloc((void**)& num_device, numSize);
cudaMemcpy(num_device, num_host, numSize, cudaMemcpyHostToDevice);
int gridDim = (numCount + 2047) / 2048;
int* gridNum_h = new int[gridDim];
cudaMalloc((void**)& gridNum_d, gridDim * sizeof(int));
cudaMemset(gridNum_d, 0, gridDim * sizeof(int));
int paddedGridDim = getTwoExpPaddedSize(gridDim);
#if 0
prefixScan << < dim3(1, 1, 1), dim3(padCount / 2, 1, 1) >> > (num_device, padCount);
prefixScanLds << < dim3(1, 1, 1), dim3(padCount / 2, 1, 1), padSize >> > (num_device, padCount);
prefixScanV3 << < dim3(1, 1, 1), dim3(padCount / 2, 1, 1), padSize >> > (num_device, padCount);
switch (paddedGridDim)
{
case 1: prefixScanV4 <2048, 1> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 2: prefixScanV4 <2048, 2> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 4: prefixScanV4 <2048, 4> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 8: prefixScanV4 <2048, 8> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 16: prefixScanV4 <2048, 16> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 32: prefixScanV4 <2048, 32> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 64: prefixScanV4 <2048, 64> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 128: prefixScanV4 <2048, 128> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 256: prefixScanV4 <2048, 256> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 512: prefixScanV4 <2048, 512> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 1024: prefixScanV4 <2048, 1024> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
case 2048: prefixScanV4 <2048, 2048> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (num_device, numCount); break;
}
#endif
prefixScanV5_block <16> << < dim3(gridDim, 1, 1), dim3(8, 1, 1), 16 * sizeof(int) >> > (num_device, gridNum_d, numCount);
cudaDeviceSynchronize();
GpuErrorCheck(cudaDeviceSynchronize());
GpuErrorCheck(cudaPeekAtLastError());
// switch (paddedGridDim)
// {
// case 1: prefixScanV5_grid <1> << < dim3(1, 1, 1), dim3(1, 1, 1), 1 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 2: prefixScanV5_grid <2> << < dim3(1, 1, 1), dim3(1, 1, 1), 2 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 4: prefixScanV5_grid <4> << < dim3(1, 1, 1), dim3(2, 1, 1), 4 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 8: prefixScanV5_grid <8> << < dim3(1, 1, 1), dim3(4, 1, 1), 8 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 16: prefixScanV5_grid <16> << < dim3(1, 1, 1), dim3(8, 1, 1), 16 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 32: prefixScanV5_grid <32> << < dim3(1, 1, 1), dim3(16, 1, 1), 32 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 64: prefixScanV5_grid <64> << < dim3(1, 1, 1), dim3(32, 1, 1), 64 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 128: prefixScanV5_grid <128> << < dim3(1, 1, 1), dim3(64, 1, 1), 128 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 256: prefixScanV5_grid <256> << < dim3(1, 1, 1), dim3(128, 1, 1), 256 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 512: prefixScanV5_grid <512> << < dim3(1, 1, 1), dim3(256, 1, 1), 512 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 1024: prefixScanV5_grid <1024> << < dim3(1, 1, 1), dim3(512, 1, 1), 1024 * sizeof(int) >> > (gridNum_d, gridDim); break;
// case 2048: prefixScanV5_grid <2048> << < dim3(1, 1, 1), dim3(1024, 1, 1), 2048 * sizeof(int) >> > (gridNum_d, gridDim); break;
// }
// GpuErrorCheck(cudaDeviceSynchronize());
// GpuErrorCheck(cudaPeekAtLastError());
// prefixScanV5_add<2048> << < dim3(gridDim, 1, 1), dim3(1024, 1, 1) >> > (num_device, gridNum_d, numCount);
// GpuErrorCheck(cudaDeviceSynchronize());
// GpuErrorCheck(cudaPeekAtLastError());
cudaMemcpy(num_host, num_device, numSize, cudaMemcpyDeviceToHost);
cudaMemcpy(gridNum_h, gridNum_d, gridDim * sizeof(int), cudaMemcpyDeviceToHost);
std::cout << "\n\n\n";
for (int i = 0; i < gridDim; ++i)
{
std::cout << gridNum_h[i] << ",";
}
std::cout << "\n\n\n";
for (int i = 0; i < numCount; ++i)
{
std::cout << num_host[i] << ",";
}
std::cout << "\n\n\n";
std::cout << num_host[numCount - 1] << "\n";
delete num_host;
delete gridNum_h;
cudaFree(gridNum_d);
cudaFree(num_device);
return 0;
}
|
42111466d1eb1c0b4bec3bce0f2f472f757055ce.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*file name: matrix.cu
*this program contains code to do realtively simple matrix operations
*This was designed to work with matrices hundreds of indexes long
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "cuPrintf.hip"
#include "matrix.h"
/*
function: matrixMultiplication
this function takes in an m X n matrix h_a,
a n X k matrix h_b, and computes the matrix multiplication and
stores the result int m x k matrix (C)
*/
__global__ void matrixMultiplication(double *h_a, double *h_b, double *h_result, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if(col < k && row < m)
{
for(int i = 0; i < n; ++i)
{
sum+=h_a[row*n+i] * h_b[i*k+col];
}
h_result[row*k + col] = sum;
}
}
/*
function: transposeMatrix
this function takes an rows x cols matrix inputMatrix
and transposes that matrix to outputMatrix that has dimension cols x row
i */
__global__ void transposeMatrix(double *inputMatrix, double* outputMatrix, const unsigned int rows, const unsigned int cols)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < cols && idy < rows)
{
unsigned int pos = idy * cols + idx;
unsigned int trans_pos = idx * rows + idy;
outputMatrix[trans_pos] = inputMatrix[pos];
}
}
/*
function: printMatrixCuda
this function prints out the matrix using CUDA
*/
__global__ void printMatrixCuda(double *C, int M, int N)
{
cuPrintf("nothing is happening \n");
for(int i = 0; i < M; i++)
{
for(int j = 0; j < N; j++)
{
cuPrintf("%f ", *((C+i*N) + j));
}
cuPrintf("\n");
}
}
/*
function: main
*/
int main(void)
{
printf("Hello world Matrix cuda function\n");
double inputA[3][3];
double inputB[3][3];
double output[3][3];
int m = 3;
int n = 3;
int k = 3;
for(int i = 0; i < 3; ++i)
{
for(int j = 0; j < 3; ++j)
{
inputA[i][j] = j+1;
inputB[i][j] = j+1;
}
}
int BLOCK_SIZE = 16;
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/*setting up how many blocks and threads we will need based on block size */
/*mallocing temps for cuda gpu */
double *cuda_inputA = NULL;
double *cuda_inputB = NULL;
double *cuda_output = NULL;
int sizeA = sizeof(inputA);
int sizeB = sizeof(inputB);
int sizeO = sizeof(output);
hipMalloc((void**) &cuda_inputA, sizeA);
hipMalloc((void**) &cuda_inputB, sizeB);
hipMalloc((void**) &cuda_output, sizeO);
printf("Finished Allocating!!\n");
/* For hipMemcpy to work, you must pass items by reference,
*/
hipMemcpy(cuda_inputA, inputA, sizeA, hipMemcpyHostToDevice);
hipMemcpy(cuda_inputB, inputB, sizeB, hipMemcpyHostToDevice);
//hipMemcpy(cuda_output, output, sizeO, hipMemcpyHostToDevice);
printf("Finished Copying Cuda Values!!\n");
printf("starting matrix Multiplication \n");
/* the parameters <<<N, K>>> indicate that you are calling to gpu.
N = number of blocks
k = number of threads inside blocks */
hipLaunchKernelGGL(( matrixMultiplication), dim3(dimGrid),dim3(dimBlock), 0, 0, cuda_inputA, cuda_inputB, cuda_output, m, n, k);
printf("finished matrix multiplcaiton! \n");
printf("bringing result from gpu to device....\n");
hipMemcpy(output, cuda_output, sizeO, hipMemcpyDeviceToHost);
printf("done!\n");
printf("Matrix A: \n");
/*initialize cuPrintf */
cudaPrintfInit();
hipLaunchKernelGGL(( printMatrixCuda), dim3(1),dim3(1), 0, 0, cuda_inputA, m, n);
cudaPrintfDisplay(NULL, true);
cudaPrintfEnd();
// printMatrix((double*)inputA, m, n);
printf("Matrix B\n");
cudaPrintfInit();
hipLaunchKernelGGL(( printMatrixCuda), dim3(1),dim3(1), 0, 0, cuda_inputB, n, k);
cudaPrintfDisplay(NULL, true);
cudaPrintfEnd();
// printMatrix((double*)inputB, n, k);
printf("Matrix Out: \n");
cudaPrintfInit();
hipLaunchKernelGGL(( printMatrixCuda), dim3(1),dim3(1), 0, 0, cuda_output, m, k);
cudaPrintfDisplay(NULL, true);
cudaPrintfEnd();
// printMatrix((double*)output, m, k);
//cudaPrintfEnd();
return 0;
}
void printMatrix(double *C, int M, int N)
{
assert(C);
for(int i = 0; i < M; i++)
{
for(int j = 0; j < N; j++)
{
printf("%f ", *((C+i*N) + j));
}
printf("\n");
}
}
|
42111466d1eb1c0b4bec3bce0f2f472f757055ce.cu
|
/*
*file name: matrix.cu
*this program contains code to do realtively simple matrix operations
*This was designed to work with matrices hundreds of indexes long
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "cuPrintf.cu"
#include "matrix.h"
/*
function: matrixMultiplication
this function takes in an m X n matrix h_a,
a n X k matrix h_b, and computes the matrix multiplication and
stores the result int m x k matrix (C)
*/
__global__ void matrixMultiplication(double *h_a, double *h_b, double *h_result, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if(col < k && row < m)
{
for(int i = 0; i < n; ++i)
{
sum+=h_a[row*n+i] * h_b[i*k+col];
}
h_result[row*k + col] = sum;
}
}
/*
function: transposeMatrix
this function takes an rows x cols matrix inputMatrix
and transposes that matrix to outputMatrix that has dimension cols x row
i */
__global__ void transposeMatrix(double *inputMatrix, double* outputMatrix, const unsigned int rows, const unsigned int cols)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < cols && idy < rows)
{
unsigned int pos = idy * cols + idx;
unsigned int trans_pos = idx * rows + idy;
outputMatrix[trans_pos] = inputMatrix[pos];
}
}
/*
function: printMatrixCuda
this function prints out the matrix using CUDA
*/
__global__ void printMatrixCuda(double *C, int M, int N)
{
cuPrintf("nothing is happening \n");
for(int i = 0; i < M; i++)
{
for(int j = 0; j < N; j++)
{
cuPrintf("%f ", *((C+i*N) + j));
}
cuPrintf("\n");
}
}
/*
function: main
*/
int main(void)
{
printf("Hello world Matrix cuda function\n");
double inputA[3][3];
double inputB[3][3];
double output[3][3];
int m = 3;
int n = 3;
int k = 3;
for(int i = 0; i < 3; ++i)
{
for(int j = 0; j < 3; ++j)
{
inputA[i][j] = j+1;
inputB[i][j] = j+1;
}
}
int BLOCK_SIZE = 16;
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/*setting up how many blocks and threads we will need based on block size */
/*mallocing temps for cuda gpu */
double *cuda_inputA = NULL;
double *cuda_inputB = NULL;
double *cuda_output = NULL;
int sizeA = sizeof(inputA);
int sizeB = sizeof(inputB);
int sizeO = sizeof(output);
cudaMalloc((void**) &cuda_inputA, sizeA);
cudaMalloc((void**) &cuda_inputB, sizeB);
cudaMalloc((void**) &cuda_output, sizeO);
printf("Finished Allocating!!\n");
/* For cudaMemcpy to work, you must pass items by reference,
*/
cudaMemcpy(cuda_inputA, inputA, sizeA, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_inputB, inputB, sizeB, cudaMemcpyHostToDevice);
//cudaMemcpy(cuda_output, output, sizeO, cudaMemcpyHostToDevice);
printf("Finished Copying Cuda Values!!\n");
printf("starting matrix Multiplication \n");
/* the parameters <<<N, K>>> indicate that you are calling to gpu.
N = number of blocks
k = number of threads inside blocks */
matrixMultiplication<<<dimGrid,dimBlock>>>(cuda_inputA, cuda_inputB, cuda_output, m, n, k);
printf("finished matrix multiplcaiton! \n");
printf("bringing result from gpu to device....\n");
cudaMemcpy(output, cuda_output, sizeO, cudaMemcpyDeviceToHost);
printf("done!\n");
printf("Matrix A: \n");
/*initialize cuPrintf */
cudaPrintfInit();
printMatrixCuda<<<1,1>>>(cuda_inputA, m, n);
cudaPrintfDisplay(NULL, true);
cudaPrintfEnd();
// printMatrix((double*)inputA, m, n);
printf("Matrix B\n");
cudaPrintfInit();
printMatrixCuda<<<1,1>>>(cuda_inputB, n, k);
cudaPrintfDisplay(NULL, true);
cudaPrintfEnd();
// printMatrix((double*)inputB, n, k);
printf("Matrix Out: \n");
cudaPrintfInit();
printMatrixCuda<<<1,1>>>(cuda_output, m, k);
cudaPrintfDisplay(NULL, true);
cudaPrintfEnd();
// printMatrix((double*)output, m, k);
//cudaPrintfEnd();
return 0;
}
void printMatrix(double *C, int M, int N)
{
assert(C);
for(int i = 0; i < M; i++)
{
for(int j = 0; j < N; j++)
{
printf("%f ", *((C+i*N) + j));
}
printf("\n");
}
}
|
23bf0cf7a8a84bbc9c84e30e2221dd2dd956c6e8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "testDrive.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
testDrive), dim3(gridBlock),dim3(threadBlock), 0, 0, );
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
testDrive), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
testDrive), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
23bf0cf7a8a84bbc9c84e30e2221dd2dd956c6e8.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "testDrive.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
testDrive<<<gridBlock,threadBlock>>>();
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
testDrive<<<gridBlock,threadBlock>>>();
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
testDrive<<<gridBlock,threadBlock>>>();
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
9ff6f31c12be234df6dfaa9d3c25a62447d2a779.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "decode_rotate.h"
#include "utils.h"
#include <algorithm>
#include <cstdint>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/gather.h>
#include <thrust/tabulate.h>
#include <thrust/count.h>
#include <thrust/find.h>
#include <hipcub/hipcub.hpp>
#include <cub/iterator/counting_input_iterator.cuh>
namespace retinanet {
namespace cuda {
int decode_rotate(int batch_size,
const void *const *inputs, void *const *outputs,
size_t height, size_t width, size_t scale,
size_t num_anchors, size_t num_classes,
const std::vector<float> &anchors, float score_thresh, int top_n,
void *workspace, size_t workspace_size, hipStream_t stream) {
int scores_size = num_anchors * num_classes * height * width;
if (!workspace || !workspace_size) {
// Return required scratch space size cub style
workspace_size = get_size_aligned<float>(anchors.size()); // anchors
workspace_size += get_size_aligned<bool>(scores_size); // flags
workspace_size += get_size_aligned<int>(scores_size); // indices
workspace_size += get_size_aligned<int>(scores_size); // indices_sorted
workspace_size += get_size_aligned<float>(scores_size); // scores
workspace_size += get_size_aligned<float>(scores_size); // scores_sorted
size_t temp_size_flag = 0;
hipcub::DeviceSelect::Flagged((void *)nullptr, temp_size_flag,
hipcub::CountingInputIterator<int>(scores_size),
(bool *)nullptr, (int *)nullptr, (int *)nullptr, scores_size);
size_t temp_size_sort = 0;
hipcub::DeviceRadixSort::SortPairsDescending((void *)nullptr, temp_size_sort,
(float *)nullptr, (float *)nullptr, (int *)nullptr, (int *)nullptr, scores_size);
workspace_size += ::max(temp_size_flag, temp_size_sort);
return workspace_size;
}
auto anchors_d = get_next_ptr<float>(anchors.size(), workspace, workspace_size);
hipMemcpyAsync(anchors_d, anchors.data(), anchors.size() * sizeof *anchors_d, hipMemcpyHostToDevice, stream);
auto on_stream = thrust::hip::par.on(stream);
auto flags = get_next_ptr<bool>(scores_size, workspace, workspace_size);
auto indices = get_next_ptr<int>(scores_size, workspace, workspace_size);
auto indices_sorted = get_next_ptr<int>(scores_size, workspace, workspace_size);
auto scores = get_next_ptr<float>(scores_size, workspace, workspace_size);
auto scores_sorted = get_next_ptr<float>(scores_size, workspace, workspace_size);
for (int batch = 0; batch < batch_size; batch++) {
auto in_scores = static_cast<const float *>(inputs[0]) + batch * scores_size;
auto in_boxes = static_cast<const float *>(inputs[1]) + batch * (scores_size / num_classes) * 6; //From 4
auto out_scores = static_cast<float *>(outputs[0]) + batch * top_n;
auto out_boxes = static_cast<float6 *>(outputs[1]) + batch * top_n; // From float4
auto out_classes = static_cast<float *>(outputs[2]) + batch * top_n;
// Discard scores below threshold
thrust::transform(on_stream, in_scores, in_scores + scores_size,
flags, thrust::placeholders::_1 > score_thresh);
int *num_selected = reinterpret_cast<int *>(indices_sorted);
hipcub::DeviceSelect::Flagged(workspace, workspace_size, hipcub::CountingInputIterator<int>(0),
flags, indices, num_selected, scores_size, stream);
hipStreamSynchronize(stream);
int num_detections = *thrust::device_pointer_cast(num_selected);
// Only keep top n scores
auto indices_filtered = indices;
if (num_detections > top_n) {
thrust::gather(on_stream, indices, indices + num_detections,
in_scores, scores);
hipcub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores, scores_sorted, indices, indices_sorted, num_detections, 0, sizeof(*scores)*8, stream);
indices_filtered = indices_sorted;
num_detections = top_n;
}
// Gather boxes
bool has_anchors = !anchors.empty();
thrust::transform(on_stream, indices_filtered, indices_filtered + num_detections,
thrust::make_zip_iterator(thrust::make_tuple(out_scores, out_boxes, out_classes)),
[=] __device__ (int i) {
int x = i % width;
int y = (i / width) % height;
int a = (i / num_classes / height / width) % num_anchors;
int cls = (i / height / width) % num_classes;
float6 box = make_float6(
make_float4(
in_boxes[((a * 6 + 0) * height + y) * width + x],
in_boxes[((a * 6 + 1) * height + y) * width + x],
in_boxes[((a * 6 + 2) * height + y) * width + x],
in_boxes[((a * 6 + 3) * height + y) * width + x]
),
make_float2(
in_boxes[((a * 6 + 4) * height + y) * width + x],
in_boxes[((a * 6 + 5) * height + y) * width + x]
)
);
if (has_anchors) {
// Add anchors offsets to deltas
float x = (i % width) * scale;
float y = ((i / width) % height) * scale;
float *d = anchors_d + 4*a;
float x1 = x + d[0];
float y1 = y + d[1];
float x2 = x + d[2];
float y2 = y + d[3];
float w = x2 - x1 + 1.0f;
float h = y2 - y1 + 1.0f;
float pred_ctr_x = box.x1 * w + x1 + 0.5f * w;
float pred_ctr_y = box.y1 * h + y1 + 0.5f * h;
float pred_w = exp(box.x2) * w;
float pred_h = exp(box.y2) * h;
float pred_sin = box.s;
float pred_cos = box.c;
box = make_float6(
make_float4(
max(0.0f, pred_ctr_x - 0.5f * pred_w),
max(0.0f, pred_ctr_y - 0.5f * pred_h),
min(pred_ctr_x + 0.5f * pred_w - 1.0f, width * scale - 1.0f),
min(pred_ctr_y + 0.5f * pred_h - 1.0f, height * scale - 1.0f)
),
make_float2(pred_sin, pred_cos)
);
}
return thrust::make_tuple(in_scores[i], box, cls);
});
// Zero-out unused scores
if (num_detections < top_n) {
thrust::fill(on_stream, out_scores + num_detections,
out_scores + top_n, 0.0f);
thrust::fill(on_stream, out_classes + num_detections,
out_classes + top_n, 0.0f);
}
}
return 0;
}
}
}
|
9ff6f31c12be234df6dfaa9d3c25a62447d2a779.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "decode_rotate.h"
#include "utils.h"
#include <algorithm>
#include <cstdint>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/gather.h>
#include <thrust/tabulate.h>
#include <thrust/count.h>
#include <thrust/find.h>
#include <cub/device/device_radix_sort.cuh>
#include <cub/iterator/counting_input_iterator.cuh>
namespace retinanet {
namespace cuda {
int decode_rotate(int batch_size,
const void *const *inputs, void *const *outputs,
size_t height, size_t width, size_t scale,
size_t num_anchors, size_t num_classes,
const std::vector<float> &anchors, float score_thresh, int top_n,
void *workspace, size_t workspace_size, cudaStream_t stream) {
int scores_size = num_anchors * num_classes * height * width;
if (!workspace || !workspace_size) {
// Return required scratch space size cub style
workspace_size = get_size_aligned<float>(anchors.size()); // anchors
workspace_size += get_size_aligned<bool>(scores_size); // flags
workspace_size += get_size_aligned<int>(scores_size); // indices
workspace_size += get_size_aligned<int>(scores_size); // indices_sorted
workspace_size += get_size_aligned<float>(scores_size); // scores
workspace_size += get_size_aligned<float>(scores_size); // scores_sorted
size_t temp_size_flag = 0;
cub::DeviceSelect::Flagged((void *)nullptr, temp_size_flag,
cub::CountingInputIterator<int>(scores_size),
(bool *)nullptr, (int *)nullptr, (int *)nullptr, scores_size);
size_t temp_size_sort = 0;
cub::DeviceRadixSort::SortPairsDescending((void *)nullptr, temp_size_sort,
(float *)nullptr, (float *)nullptr, (int *)nullptr, (int *)nullptr, scores_size);
workspace_size += std::max(temp_size_flag, temp_size_sort);
return workspace_size;
}
auto anchors_d = get_next_ptr<float>(anchors.size(), workspace, workspace_size);
cudaMemcpyAsync(anchors_d, anchors.data(), anchors.size() * sizeof *anchors_d, cudaMemcpyHostToDevice, stream);
auto on_stream = thrust::cuda::par.on(stream);
auto flags = get_next_ptr<bool>(scores_size, workspace, workspace_size);
auto indices = get_next_ptr<int>(scores_size, workspace, workspace_size);
auto indices_sorted = get_next_ptr<int>(scores_size, workspace, workspace_size);
auto scores = get_next_ptr<float>(scores_size, workspace, workspace_size);
auto scores_sorted = get_next_ptr<float>(scores_size, workspace, workspace_size);
for (int batch = 0; batch < batch_size; batch++) {
auto in_scores = static_cast<const float *>(inputs[0]) + batch * scores_size;
auto in_boxes = static_cast<const float *>(inputs[1]) + batch * (scores_size / num_classes) * 6; //From 4
auto out_scores = static_cast<float *>(outputs[0]) + batch * top_n;
auto out_boxes = static_cast<float6 *>(outputs[1]) + batch * top_n; // From float4
auto out_classes = static_cast<float *>(outputs[2]) + batch * top_n;
// Discard scores below threshold
thrust::transform(on_stream, in_scores, in_scores + scores_size,
flags, thrust::placeholders::_1 > score_thresh);
int *num_selected = reinterpret_cast<int *>(indices_sorted);
cub::DeviceSelect::Flagged(workspace, workspace_size, cub::CountingInputIterator<int>(0),
flags, indices, num_selected, scores_size, stream);
cudaStreamSynchronize(stream);
int num_detections = *thrust::device_pointer_cast(num_selected);
// Only keep top n scores
auto indices_filtered = indices;
if (num_detections > top_n) {
thrust::gather(on_stream, indices, indices + num_detections,
in_scores, scores);
cub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores, scores_sorted, indices, indices_sorted, num_detections, 0, sizeof(*scores)*8, stream);
indices_filtered = indices_sorted;
num_detections = top_n;
}
// Gather boxes
bool has_anchors = !anchors.empty();
thrust::transform(on_stream, indices_filtered, indices_filtered + num_detections,
thrust::make_zip_iterator(thrust::make_tuple(out_scores, out_boxes, out_classes)),
[=] __device__ (int i) {
int x = i % width;
int y = (i / width) % height;
int a = (i / num_classes / height / width) % num_anchors;
int cls = (i / height / width) % num_classes;
float6 box = make_float6(
make_float4(
in_boxes[((a * 6 + 0) * height + y) * width + x],
in_boxes[((a * 6 + 1) * height + y) * width + x],
in_boxes[((a * 6 + 2) * height + y) * width + x],
in_boxes[((a * 6 + 3) * height + y) * width + x]
),
make_float2(
in_boxes[((a * 6 + 4) * height + y) * width + x],
in_boxes[((a * 6 + 5) * height + y) * width + x]
)
);
if (has_anchors) {
// Add anchors offsets to deltas
float x = (i % width) * scale;
float y = ((i / width) % height) * scale;
float *d = anchors_d + 4*a;
float x1 = x + d[0];
float y1 = y + d[1];
float x2 = x + d[2];
float y2 = y + d[3];
float w = x2 - x1 + 1.0f;
float h = y2 - y1 + 1.0f;
float pred_ctr_x = box.x1 * w + x1 + 0.5f * w;
float pred_ctr_y = box.y1 * h + y1 + 0.5f * h;
float pred_w = exp(box.x2) * w;
float pred_h = exp(box.y2) * h;
float pred_sin = box.s;
float pred_cos = box.c;
box = make_float6(
make_float4(
max(0.0f, pred_ctr_x - 0.5f * pred_w),
max(0.0f, pred_ctr_y - 0.5f * pred_h),
min(pred_ctr_x + 0.5f * pred_w - 1.0f, width * scale - 1.0f),
min(pred_ctr_y + 0.5f * pred_h - 1.0f, height * scale - 1.0f)
),
make_float2(pred_sin, pred_cos)
);
}
return thrust::make_tuple(in_scores[i], box, cls);
});
// Zero-out unused scores
if (num_detections < top_n) {
thrust::fill(on_stream, out_scores + num_detections,
out_scores + top_n, 0.0f);
thrust::fill(on_stream, out_classes + num_detections,
out_classes + top_n, 0.0f);
}
}
return 0;
}
}
}
|
f9fa9c3bac9cc4b02985b33c4e8fb428a0b7673f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/sparse_cross_entropy_kernel_util.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/cuda/softmax.cuh"
#include "oneflow/core/kernel/cuda_graph_support.h"
namespace oneflow {
namespace user_op {
namespace {
template<typename T>
void ComputeProb(DeviceCtx* ctx, const int64_t row, const int64_t col, const T* in, T* prob) {
using ComputeType = typename cuda::softmax::DefaultComputeType<T>::type;
cuda::softmax::DirectLoad<T, ComputeType> load(in, col);
cuda::softmax::DirectStore<ComputeType, T> store(prob, col);
cuda::softmax::DispatchLogSoftmax<decltype(load), decltype(store), ComputeType>(
ctx->cuda_stream(), load, store, row, col);
}
template<>
void ComputeProb(DeviceCtx* ctx, const int64_t row, const int64_t col, const float16* in,
float16* prob) {
cuda::softmax::DirectLoad<half, float> load(reinterpret_cast<const half*>(in), col);
cuda::softmax::DirectStore<float, half> store(reinterpret_cast<half*>(prob), col);
cuda::softmax::DispatchLogSoftmax<decltype(load), decltype(store), float>(ctx->cuda_stream(),
load, store, row, col);
}
template<typename T, typename K>
__global__ void ComputeSparseSoftmaxCrossEntropyResultGpu(const int64_t num_instances,
const int64_t num_classes,
const int64_t depth,
const int64_t lower_bound,
const K* labels, const T* prob, T* out) {
CUDA_1D_KERNEL_LOOP(i, num_instances) {
assert(labels[i] >= 0);
assert(labels[i] < depth);
K label = labels[i] - lower_bound;
if (label >= 0 && label < num_classes) { out[i] = -prob[i * num_classes + label]; }
}
}
template<typename T, typename K>
inline typename std::enable_if<std::is_floating_point<T>::value, void>::type
ComputeSparseSoftmaxCrossEntropyResult(DeviceCtx* ctx, const int64_t num_instances,
const int64_t num_classes, const int64_t depth,
const int64_t lower_bound, const K* labels, const T* prob,
T* out) {
hipLaunchKernelGGL(( ComputeSparseSoftmaxCrossEntropyResultGpu<T, K>)
, dim3(BlocksNum4ThreadsNum(num_instances)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
num_instances, num_classes, depth, lower_bound, labels, prob, out);
}
template<typename T, typename K>
inline typename std::enable_if<std::is_same<T, float16>::value, void>::type
ComputeSparseSoftmaxCrossEntropyResult(DeviceCtx* ctx, const int64_t num_instances,
const int64_t num_classes, const int64_t depth,
const int64_t lower_bound, const K* labels, const T* prob,
T* out) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
hipLaunchKernelGGL(( ComputeSparseSoftmaxCrossEntropyResultGpu<half, K>)
, dim3(BlocksNum4ThreadsNum(num_instances)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
num_instances, num_classes, depth, lower_bound, labels,
reinterpret_cast<const half*>(prob), reinterpret_cast<half*>(out));
#else
printf("use half need nvcc arch >= 530");
assert(false);
#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/
}
} // namespace
template<typename T, typename K>
class SparseSoftmaxCrossEntropyKernel final : public user_op::OpKernel,
public user_op::CudaGraphSupport {
public:
SparseSoftmaxCrossEntropyKernel() = default;
~SparseSoftmaxCrossEntropyKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* prediction = ctx->Tensor4ArgNameAndIndex("prediction", 0);
const user_op::Tensor* label = ctx->Tensor4ArgNameAndIndex("label", 0);
user_op::Tensor* prob = ctx->Tensor4ArgNameAndIndex("prob", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const int64_t num_instances = label->shape().elem_cnt();
CHECK_EQ(prediction->shape().elem_cnt() % num_instances, 0);
const int64_t num_classes = prediction->shape().elem_cnt() / num_instances;
const int64_t lower_bound = 0;
const int64_t depth = ctx->Attr<int64_t>("depth");
ComputeProb<T>(ctx->device_ctx(), num_instances, num_classes, prediction->dptr<T>(),
prob->mut_dptr<T>());
ComputeSparseSoftmaxCrossEntropyResult<T, K>(ctx->device_ctx(), num_instances, num_classes,
depth, lower_bound, label->dptr<K>(),
prob->dptr<T>(), out->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_SPARSE_SOFTMAX_CROSS_ENTROPY_KERNEL(dtype_pair, ltype_pair) \
REGISTER_USER_KERNEL("sparse_softmax_cross_entropy") \
.SetCreateFn<SparseSoftmaxCrossEntropyKernel<OF_PP_PAIR_FIRST(dtype_pair), \
OF_PP_PAIR_FIRST(ltype_pair)>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \
& (user_op::HobDataType("label", 0) == OF_PP_PAIR_SECOND(ltype_pair)) \
& (user_op::HobDataType("out", 0) == OF_PP_PAIR_SECOND(dtype_pair)));
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_SPARSE_SOFTMAX_CROSS_ENTROPY_KERNEL,
FLOATING_DATA_TYPE_SEQ FLOAT16_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
} // namespace user_op
} // namespace oneflow
|
f9fa9c3bac9cc4b02985b33c4e8fb428a0b7673f.cu
|
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/sparse_cross_entropy_kernel_util.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/cuda/softmax.cuh"
#include "oneflow/core/kernel/cuda_graph_support.h"
namespace oneflow {
namespace user_op {
namespace {
template<typename T>
void ComputeProb(DeviceCtx* ctx, const int64_t row, const int64_t col, const T* in, T* prob) {
using ComputeType = typename cuda::softmax::DefaultComputeType<T>::type;
cuda::softmax::DirectLoad<T, ComputeType> load(in, col);
cuda::softmax::DirectStore<ComputeType, T> store(prob, col);
cuda::softmax::DispatchLogSoftmax<decltype(load), decltype(store), ComputeType>(
ctx->cuda_stream(), load, store, row, col);
}
template<>
void ComputeProb(DeviceCtx* ctx, const int64_t row, const int64_t col, const float16* in,
float16* prob) {
cuda::softmax::DirectLoad<half, float> load(reinterpret_cast<const half*>(in), col);
cuda::softmax::DirectStore<float, half> store(reinterpret_cast<half*>(prob), col);
cuda::softmax::DispatchLogSoftmax<decltype(load), decltype(store), float>(ctx->cuda_stream(),
load, store, row, col);
}
template<typename T, typename K>
__global__ void ComputeSparseSoftmaxCrossEntropyResultGpu(const int64_t num_instances,
const int64_t num_classes,
const int64_t depth,
const int64_t lower_bound,
const K* labels, const T* prob, T* out) {
CUDA_1D_KERNEL_LOOP(i, num_instances) {
assert(labels[i] >= 0);
assert(labels[i] < depth);
K label = labels[i] - lower_bound;
if (label >= 0 && label < num_classes) { out[i] = -prob[i * num_classes + label]; }
}
}
template<typename T, typename K>
inline typename std::enable_if<std::is_floating_point<T>::value, void>::type
ComputeSparseSoftmaxCrossEntropyResult(DeviceCtx* ctx, const int64_t num_instances,
const int64_t num_classes, const int64_t depth,
const int64_t lower_bound, const K* labels, const T* prob,
T* out) {
ComputeSparseSoftmaxCrossEntropyResultGpu<T, K>
<<<BlocksNum4ThreadsNum(num_instances), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
num_instances, num_classes, depth, lower_bound, labels, prob, out);
}
template<typename T, typename K>
inline typename std::enable_if<std::is_same<T, float16>::value, void>::type
ComputeSparseSoftmaxCrossEntropyResult(DeviceCtx* ctx, const int64_t num_instances,
const int64_t num_classes, const int64_t depth,
const int64_t lower_bound, const K* labels, const T* prob,
T* out) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
ComputeSparseSoftmaxCrossEntropyResultGpu<half, K>
<<<BlocksNum4ThreadsNum(num_instances), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
num_instances, num_classes, depth, lower_bound, labels,
reinterpret_cast<const half*>(prob), reinterpret_cast<half*>(out));
#else
printf("use half need nvcc arch >= 530");
assert(false);
#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/
}
} // namespace
template<typename T, typename K>
class SparseSoftmaxCrossEntropyKernel final : public user_op::OpKernel,
public user_op::CudaGraphSupport {
public:
SparseSoftmaxCrossEntropyKernel() = default;
~SparseSoftmaxCrossEntropyKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* prediction = ctx->Tensor4ArgNameAndIndex("prediction", 0);
const user_op::Tensor* label = ctx->Tensor4ArgNameAndIndex("label", 0);
user_op::Tensor* prob = ctx->Tensor4ArgNameAndIndex("prob", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const int64_t num_instances = label->shape().elem_cnt();
CHECK_EQ(prediction->shape().elem_cnt() % num_instances, 0);
const int64_t num_classes = prediction->shape().elem_cnt() / num_instances;
const int64_t lower_bound = 0;
const int64_t depth = ctx->Attr<int64_t>("depth");
ComputeProb<T>(ctx->device_ctx(), num_instances, num_classes, prediction->dptr<T>(),
prob->mut_dptr<T>());
ComputeSparseSoftmaxCrossEntropyResult<T, K>(ctx->device_ctx(), num_instances, num_classes,
depth, lower_bound, label->dptr<K>(),
prob->dptr<T>(), out->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_SPARSE_SOFTMAX_CROSS_ENTROPY_KERNEL(dtype_pair, ltype_pair) \
REGISTER_USER_KERNEL("sparse_softmax_cross_entropy") \
.SetCreateFn<SparseSoftmaxCrossEntropyKernel<OF_PP_PAIR_FIRST(dtype_pair), \
OF_PP_PAIR_FIRST(ltype_pair)>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \
& (user_op::HobDataType("label", 0) == OF_PP_PAIR_SECOND(ltype_pair)) \
& (user_op::HobDataType("out", 0) == OF_PP_PAIR_SECOND(dtype_pair)));
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_SPARSE_SOFTMAX_CROSS_ENTROPY_KERNEL,
FLOATING_DATA_TYPE_SEQ FLOAT16_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
} // namespace user_op
} // namespace oneflow
|
6dad75983ede7289635a94f7938dd20cc2026f1f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define d_vx(z,x) d_vx[(x)*(nz)+(z)]
#define d_vy(z,x) d_vy[(x)*(nz)+(z)]
#define d_vz(z,x) d_vz[(x)*(nz)+(z)]
#define d_szz(z,x) d_szz[(x)*(nz)+(z)] // Pressure
#define d_mem_dvz_dz(z,x) d_mem_dvz_dz[(x)*(nz)+(z)]
#define d_mem_dvx_dx(z,x) d_mem_dvx_dx[(x)*(nz)+(z)]
#define d_Lambda(z,x) d_Lambda[(x)*(nz)+(z)]
#define d_Den(z,x) d_Den[(x)*(nz)+(z)]
#define d_mat_dvz_dz(z,x) d_mat_dvz_dz[(x)*(nz)+(z)]
#define d_mat_dvx_dx(z,x) d_mat_dvx_dx[(x)*(nz)+(z)]
#define d_Cp(z,x) d_Cp[(x)*(nz)+(z)]
#define d_CpGrad(z,x) d_CpGrad[(x)*(nz)+(z)]
__global__ void image_vel(float *d_szz, \
int nz, int nx, float dt, float dz, float dx, int nPml, int nPad, \
float *d_Cp, float *d_Den, float *d_mat_dvz_dz, float *d_mat_dvx_dx, float * d_CpGrad){
int gidz = blockIdx.x*blockDim.x + threadIdx.x;
int gidx = blockIdx.y*blockDim.y + threadIdx.y;
if (gidz>=nPml && gidz<=nz-nPad-nPml-1 && gidx>=nPml && gidx<=nx-nPml-1) {
// compute the Vp kernel on the fly
d_CpGrad(gidz, gidx) += -2.0 * d_Cp(gidz, gidx) * d_Den(gidz, gidx) *\
(d_mat_dvz_dz(gidz, gidx) + d_mat_dvx_dx(gidz, gidx)) * d_szz(gidz, gidx) * dt;
}
else {
return;
}
}
|
6dad75983ede7289635a94f7938dd20cc2026f1f.cu
|
#define d_vx(z,x) d_vx[(x)*(nz)+(z)]
#define d_vy(z,x) d_vy[(x)*(nz)+(z)]
#define d_vz(z,x) d_vz[(x)*(nz)+(z)]
#define d_szz(z,x) d_szz[(x)*(nz)+(z)] // Pressure
#define d_mem_dvz_dz(z,x) d_mem_dvz_dz[(x)*(nz)+(z)]
#define d_mem_dvx_dx(z,x) d_mem_dvx_dx[(x)*(nz)+(z)]
#define d_Lambda(z,x) d_Lambda[(x)*(nz)+(z)]
#define d_Den(z,x) d_Den[(x)*(nz)+(z)]
#define d_mat_dvz_dz(z,x) d_mat_dvz_dz[(x)*(nz)+(z)]
#define d_mat_dvx_dx(z,x) d_mat_dvx_dx[(x)*(nz)+(z)]
#define d_Cp(z,x) d_Cp[(x)*(nz)+(z)]
#define d_CpGrad(z,x) d_CpGrad[(x)*(nz)+(z)]
__global__ void image_vel(float *d_szz, \
int nz, int nx, float dt, float dz, float dx, int nPml, int nPad, \
float *d_Cp, float *d_Den, float *d_mat_dvz_dz, float *d_mat_dvx_dx, float * d_CpGrad){
int gidz = blockIdx.x*blockDim.x + threadIdx.x;
int gidx = blockIdx.y*blockDim.y + threadIdx.y;
if (gidz>=nPml && gidz<=nz-nPad-nPml-1 && gidx>=nPml && gidx<=nx-nPml-1) {
// compute the Vp kernel on the fly
d_CpGrad(gidz, gidx) += -2.0 * d_Cp(gidz, gidx) * d_Den(gidz, gidx) *\
(d_mat_dvz_dz(gidz, gidx) + d_mat_dvx_dx(gidz, gidx)) * d_szz(gidz, gidx) * dt;
}
else {
return;
}
}
|
21c645805785e1dbc9a71bd321596e32807f8043.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* File description: cumat_add.cu
* Author information: Mike Ranzinger [email protected]
* Copyright information: Copyright Orchestr8 LLC
*/
#include "cumat.cuh"
#include <rocblas.h>
#include <stdexcept>
#include <assert.h>
using namespace std;
CuMat operator+(const CuMat &a, const CuMat &b)
{
CuMat ret;
a.BinaryExpr<false>(b, ret, CuPlus());
return ret;
}
CuMat operator-(const CuMat &a, const CuMat &b)
{
CuMat ret;
a.BinaryExpr<false>(b, ret, CuMinus());
return ret;
}
CuMat &operator+=(CuMat &a, const CuMat &b)
{
a.BinaryExpr(b, CuPlus());
return a;
}
CuMat &operator-=(CuMat &a, const CuMat &b)
{
a.BinaryExpr(b, CuMinus());
return a;
}
void CuMat::AddScaled(Real scaleThis, const CuMat& b, Real scaleB)
{
AddScaled(scaleThis, b, scaleB, *this);
}
void CuMat::AddScaled(Real scaleThis, const CuMat& b, Real scaleB,
CuMat& dest) const
{
AssertSameDims(b);
BinaryExpr<false>(b, dest, CuAddScaledBinary(scaleThis, scaleB));
}
void AddScaled(const CuMat &a, Real scaleA, const CuMat &b, Real scaleB, CuMat &dest)
{
a.AddScaled(scaleA, b, scaleB, dest);
}
|
21c645805785e1dbc9a71bd321596e32807f8043.cu
|
/*
* File description: cumat_add.cu
* Author information: Mike Ranzinger [email protected]
* Copyright information: Copyright Orchestr8 LLC
*/
#include "cumat.cuh"
#include <cublas_v2.h>
#include <stdexcept>
#include <assert.h>
using namespace std;
CuMat operator+(const CuMat &a, const CuMat &b)
{
CuMat ret;
a.BinaryExpr<false>(b, ret, CuPlus());
return ret;
}
CuMat operator-(const CuMat &a, const CuMat &b)
{
CuMat ret;
a.BinaryExpr<false>(b, ret, CuMinus());
return ret;
}
CuMat &operator+=(CuMat &a, const CuMat &b)
{
a.BinaryExpr(b, CuPlus());
return a;
}
CuMat &operator-=(CuMat &a, const CuMat &b)
{
a.BinaryExpr(b, CuMinus());
return a;
}
void CuMat::AddScaled(Real scaleThis, const CuMat& b, Real scaleB)
{
AddScaled(scaleThis, b, scaleB, *this);
}
void CuMat::AddScaled(Real scaleThis, const CuMat& b, Real scaleB,
CuMat& dest) const
{
AssertSameDims(b);
BinaryExpr<false>(b, dest, CuAddScaledBinary(scaleThis, scaleB));
}
void AddScaled(const CuMat &a, Real scaleA, const CuMat &b, Real scaleB, CuMat &dest)
{
a.AddScaled(scaleA, b, scaleB, dest);
}
|
ee3b41ee6ee2d3a8e7da7e4d03d4a99b351e9562.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#include "vision_cuda.h"
namespace rcnn{
namespace layers{
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T bilinear_interpolate(const T* bottom_data,
const int height, const int width,
T y, T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int) y;
int x_low = (int) x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForward(const int nthreads, const T* bottom_data,
const T spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
const T* bottom_rois, T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height, const int width,
T y, T x,
T & w1, T & w2, T & w3, T & w4,
int & x_low, int & x_high, int & y_low, int & y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int) y;
x_low = (int) x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = bottom_data[y_low * width + x_low];
// T v2 = bottom_data[y_low * width + x_high];
// T v3 = bottom_data[y_high * width + x_low];
// T v4 = bottom_data[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void RoIAlignBackwardFeature(const int nthreads, const T* top_diff,
const int num_rois, const T spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(height, width, y, x,
w1, w2, w3, w4,
x_low, x_high, y_low, y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0)
{
atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
torch::Tensor ROIAlign_forward_cuda(const torch::Tensor& input,
const torch::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio) {
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
int current_device;
THCudaCheck(hipGetDevice(¤t_device));
THCudaCheck(hipSetDevice(input.get_device()));
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = torch::empty({num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(THCCeilDiv((long)output_size, 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
THCudaCheck(hipGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_forward", [&] {
hipLaunchKernelGGL(( RoIAlignForward<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data<scalar_t>(),
output.data<scalar_t>());
});
THCudaCheck(hipGetLastError());
THCudaCheck(hipSetDevice(current_device));
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
torch::Tensor ROIAlign_backward_cuda(const torch::Tensor& grad,
const torch::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio) {
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
int current_device;
THCudaCheck(hipGetDevice(¤t_device));
THCudaCheck(hipSetDevice(grad.get_device()));
auto num_rois = rois.size(0);
auto grad_input = torch::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(THCCeilDiv((long)grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(hipGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlign_backward", [&] {
hipLaunchKernelGGL(( RoIAlignBackwardFeature<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.contiguous().data<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>());
});
THCudaCheck(hipGetLastError());
THCudaCheck(hipSetDevice(current_device));
return grad_input;
}
}
}
|
ee3b41ee6ee2d3a8e7da7e4d03d4a99b351e9562.cu
|
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include "vision_cuda.h"
namespace rcnn{
namespace layers{
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T bilinear_interpolate(const T* bottom_data,
const int height, const int width,
T y, T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int) y;
int x_low = (int) x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForward(const int nthreads, const T* bottom_data,
const T spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
const T* bottom_rois, T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height, const int width,
T y, T x,
T & w1, T & w2, T & w3, T & w4,
int & x_low, int & x_high, int & y_low, int & y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int) y;
x_low = (int) x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = bottom_data[y_low * width + x_low];
// T v2 = bottom_data[y_low * width + x_high];
// T v3 = bottom_data[y_high * width + x_low];
// T v4 = bottom_data[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void RoIAlignBackwardFeature(const int nthreads, const T* top_diff,
const int num_rois, const T spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(height, width, y, x,
w1, w2, w3, w4,
x_low, x_high, y_low, y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0)
{
atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
torch::Tensor ROIAlign_forward_cuda(const torch::Tensor& input,
const torch::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio) {
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
int current_device;
THCudaCheck(cudaGetDevice(¤t_device));
THCudaCheck(cudaSetDevice(input.get_device()));
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = torch::empty({num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(THCCeilDiv((long)output_size, 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
THCudaCheck(cudaGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_forward", [&] {
RoIAlignForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data<scalar_t>(),
output.data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
THCudaCheck(cudaSetDevice(current_device));
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
torch::Tensor ROIAlign_backward_cuda(const torch::Tensor& grad,
const torch::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio) {
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
int current_device;
THCudaCheck(cudaGetDevice(¤t_device));
THCudaCheck(cudaSetDevice(grad.get_device()));
auto num_rois = rois.size(0);
auto grad_input = torch::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(THCCeilDiv((long)grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(cudaGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlign_backward", [&] {
RoIAlignBackwardFeature<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
THCudaCheck(cudaSetDevice(current_device));
return grad_input;
}
}
}
|
10620a0112f641ab729829d95c0e3d0064237f76.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "BrokenLineFitOnGPU.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
template <typename TrackerTraits>
void HelixFitOnGPU<TrackerTraits>::launchBrokenLineKernels(const TrackingRecHitSoAConstView<TrackerTraits>& hv,
uint32_t hitsInFit,
uint32_t maxNumberOfTuples,
hipStream_t stream) {
assert(tuples_);
auto blockSize = 64;
auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize;
// Fit internals
auto tkidGPU =
cms::cuda::make_device_unique<typename TrackerTraits::tindex_type[]>(maxNumberOfConcurrentFits_, stream);
auto hitsGPU = cms::cuda::make_device_unique<double[]>(
maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix3xNd<6>) / sizeof(double), stream);
auto hits_geGPU = cms::cuda::make_device_unique<float[]>(
maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix6xNf<6>) / sizeof(float), stream);
auto fast_fit_resultsGPU = cms::cuda::make_device_unique<double[]>(
maxNumberOfConcurrentFits_ * sizeof(riemannFit::Vector4d) / sizeof(double), stream);
for (uint32_t offset = 0; offset < maxNumberOfTuples; offset += maxNumberOfConcurrentFits_) {
// fit triplets
hipLaunchKernelGGL(( kernel_BLFastFit<3, TrackerTraits>), dim3(numberOfBlocks), dim3(blockSize), 0, stream, tuples_,
tupleMultiplicity_,
hv,
tkidGPU.get(),
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
3,
3,
offset);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( kernel_BLFit<3, TrackerTraits>), dim3(numberOfBlocks), dim3(blockSize), 0, stream, tupleMultiplicity_,
bField_,
outputSoa_,
tkidGPU.get(),
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get());
cudaCheck(hipGetLastError());
if (fitNas4_) {
// fit all as 4
riemannFit::rolling_fits<4, TrackerTraits::maxHitsOnTrack, 1>([this,
&hv,
&tkidGPU,
&hitsGPU,
&hits_geGPU,
&fast_fit_resultsGPU,
&offset,
&numberOfBlocks,
&blockSize,
&stream](auto i) {
hipLaunchKernelGGL(( kernel_BLFastFit<4, TrackerTraits>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tuples_,
tupleMultiplicity_,
hv,
tkidGPU.get(),
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
4,
4,
offset);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( kernel_BLFit<4, TrackerTraits>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_,
bField_,
outputSoa_,
tkidGPU.get(),
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get());
cudaCheck(hipGetLastError());
});
} else {
riemannFit::rolling_fits<4, TrackerTraits::maxHitsOnTrackForFullFit, 1>([this,
&hv,
&tkidGPU,
&hitsGPU,
&hits_geGPU,
&fast_fit_resultsGPU,
&offset,
&numberOfBlocks,
&blockSize,
&stream](auto i) {
hipLaunchKernelGGL(( kernel_BLFastFit<i, TrackerTraits>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tuples_,
tupleMultiplicity_,
hv,
tkidGPU.get(),
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
i,
i,
offset);
hipLaunchKernelGGL(( kernel_BLFit<i, TrackerTraits>), dim3(8), dim3(blockSize), 0, stream, tupleMultiplicity_,
bField_,
outputSoa_,
tkidGPU.get(),
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get());
});
static_assert(TrackerTraits::maxHitsOnTrackForFullFit < TrackerTraits::maxHitsOnTrack);
//Fit all the rest using the maximum from previous call
hipLaunchKernelGGL(( kernel_BLFastFit<TrackerTraits::maxHitsOnTrackForFullFit, TrackerTraits>)
, dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tuples_,
tupleMultiplicity_,
hv,
tkidGPU.get(),
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
TrackerTraits::maxHitsOnTrackForFullFit,
TrackerTraits::maxHitsOnTrack - 1,
offset);
hipLaunchKernelGGL(( kernel_BLFit<TrackerTraits::maxHitsOnTrackForFullFit, TrackerTraits>)
, dim3(8), dim3(blockSize), 0, stream, tupleMultiplicity_,
bField_,
outputSoa_,
tkidGPU.get(),
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get());
}
} // loop on concurrent fits
}
template class HelixFitOnGPU<pixelTopology::Phase1>;
template class HelixFitOnGPU<pixelTopology::Phase2>;
|
10620a0112f641ab729829d95c0e3d0064237f76.cu
|
#include "BrokenLineFitOnGPU.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
template <typename TrackerTraits>
void HelixFitOnGPU<TrackerTraits>::launchBrokenLineKernels(const TrackingRecHitSoAConstView<TrackerTraits>& hv,
uint32_t hitsInFit,
uint32_t maxNumberOfTuples,
cudaStream_t stream) {
assert(tuples_);
auto blockSize = 64;
auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize;
// Fit internals
auto tkidGPU =
cms::cuda::make_device_unique<typename TrackerTraits::tindex_type[]>(maxNumberOfConcurrentFits_, stream);
auto hitsGPU = cms::cuda::make_device_unique<double[]>(
maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix3xNd<6>) / sizeof(double), stream);
auto hits_geGPU = cms::cuda::make_device_unique<float[]>(
maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix6xNf<6>) / sizeof(float), stream);
auto fast_fit_resultsGPU = cms::cuda::make_device_unique<double[]>(
maxNumberOfConcurrentFits_ * sizeof(riemannFit::Vector4d) / sizeof(double), stream);
for (uint32_t offset = 0; offset < maxNumberOfTuples; offset += maxNumberOfConcurrentFits_) {
// fit triplets
kernel_BLFastFit<3, TrackerTraits><<<numberOfBlocks, blockSize, 0, stream>>>(tuples_,
tupleMultiplicity_,
hv,
tkidGPU.get(),
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
3,
3,
offset);
cudaCheck(cudaGetLastError());
kernel_BLFit<3, TrackerTraits><<<numberOfBlocks, blockSize, 0, stream>>>(tupleMultiplicity_,
bField_,
outputSoa_,
tkidGPU.get(),
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get());
cudaCheck(cudaGetLastError());
if (fitNas4_) {
// fit all as 4
riemannFit::rolling_fits<4, TrackerTraits::maxHitsOnTrack, 1>([this,
&hv,
&tkidGPU,
&hitsGPU,
&hits_geGPU,
&fast_fit_resultsGPU,
&offset,
&numberOfBlocks,
&blockSize,
&stream](auto i) {
kernel_BLFastFit<4, TrackerTraits><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tuples_,
tupleMultiplicity_,
hv,
tkidGPU.get(),
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
4,
4,
offset);
cudaCheck(cudaGetLastError());
kernel_BLFit<4, TrackerTraits><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_,
bField_,
outputSoa_,
tkidGPU.get(),
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get());
cudaCheck(cudaGetLastError());
});
} else {
riemannFit::rolling_fits<4, TrackerTraits::maxHitsOnTrackForFullFit, 1>([this,
&hv,
&tkidGPU,
&hitsGPU,
&hits_geGPU,
&fast_fit_resultsGPU,
&offset,
&numberOfBlocks,
&blockSize,
&stream](auto i) {
kernel_BLFastFit<i, TrackerTraits><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tuples_,
tupleMultiplicity_,
hv,
tkidGPU.get(),
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
i,
i,
offset);
kernel_BLFit<i, TrackerTraits><<<8, blockSize, 0, stream>>>(tupleMultiplicity_,
bField_,
outputSoa_,
tkidGPU.get(),
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get());
});
static_assert(TrackerTraits::maxHitsOnTrackForFullFit < TrackerTraits::maxHitsOnTrack);
//Fit all the rest using the maximum from previous call
kernel_BLFastFit<TrackerTraits::maxHitsOnTrackForFullFit, TrackerTraits>
<<<numberOfBlocks / 4, blockSize, 0, stream>>>(tuples_,
tupleMultiplicity_,
hv,
tkidGPU.get(),
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
TrackerTraits::maxHitsOnTrackForFullFit,
TrackerTraits::maxHitsOnTrack - 1,
offset);
kernel_BLFit<TrackerTraits::maxHitsOnTrackForFullFit, TrackerTraits>
<<<8, blockSize, 0, stream>>>(tupleMultiplicity_,
bField_,
outputSoa_,
tkidGPU.get(),
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get());
}
} // loop on concurrent fits
}
template class HelixFitOnGPU<pixelTopology::Phase1>;
template class HelixFitOnGPU<pixelTopology::Phase2>;
|
ff723764b928a3c389946c7e8210368cd7e28afe.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
#include <sys/time.h>
/////////////////////////////L1 is enabled. "ALL_CCFLAGS += -Xptxas -dlcm=ca"
//////////////large vs small data.
void init_cpu_data(long long int* A, long long int size, long long int stride){
for (long long int i = 0; i < size; i++){
A[i]=1;
}
/*
for (long long int i = 0; i < size - stride; i++){
A[i]=(i + stride);
}
for (long long int i = size - stride; i < size; i++){
A[i]=0;
}
*/
}
__global__ void gpu_initialization(long long int *A, long long int data_stride, long long int data_size){
long long int index = (blockIdx.x * blockDim.x + threadIdx.x);
long long int thread_num = gridDim.x * blockDim.x;
for(long long int it = 0; it < data_size; it = it + thread_num){
A[index + it]=23;
}
}
/*
__global__ void gpu_initialization(long long int *A, long long int data_stride, long long int data_size){
long long int index = (blockIdx.x * blockDim.x + threadIdx.x) * data_stride;
A[index]=23;
}
*/
long long unsigned time_diff(timespec start, timespec end){
struct timespec temp;
if ((end.tv_nsec - start.tv_nsec) < 0){
temp.tv_sec = end.tv_sec - start.tv_sec - 1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
}
else{
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
long long unsigned time_interval_ns = temp.tv_nsec;
long long unsigned time_interval_s = temp.tv_sec;
time_interval_s = time_interval_s * 1000000000;
return time_interval_s + time_interval_ns;
}
//__global__ void Page_visitor(long long int *A, long long int *B, long long int data_stride, long long int clock_count){
__global__ void Page_visitor(long long int *A, long long int data_stride, long long int clock_count){////load-compute -store
/*
long long int index = threadIdx.x;
/////////////////////////////////time
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
if(index = 0){
start_time= clock64();
}
__syncthreads();
*/
long long int index = (blockIdx.x * blockDim.x + threadIdx.x) * data_stride;
long long int value = A[index];
/*
//////////////////////////////////////////////sleep
long long int start_clock = clock64();
long long int clock_offset = 0;
while (clock_offset < clock_count)
{
clock_offset = clock64() - start_clock;
}
*/
//////////////////////////////////////////////loop
long long int clock_offset = 0;
while (clock_offset < clock_count){/////////////////what's the time overhead for addition and multiplication?
clock_offset++;
value = value + threadIdx.x;
}
/*
if(threadIdx.x == 0){/////%tid %ntid %laneid %warpid %nwarpid %ctaid %nctaid %smid %nsmid %gridid
int smid = 1;
asm("mov.u32 %0, %smid;" : "=r"(smid) );
printf("blockIdx.x: %d, smid: %d\n", blockIdx.x, smid);
if(blockIdx.x == 55){
int nsmid = 1;
asm("mov.u32 %0, %smid;" : "=r"(nsmid) );
printf("nsmid: %d\n", nsmid);
}
}
*/
//d_o[0] = clock_offset;
//////////////////////////////////////////////sleep
A[index] = value;
/*
__syncthreads();
/////////////////////////////////time
if(index = 0){
start_time= clock64();
time_interval = end_time - start_time;//////clock
}
//B[0] = time_interval;
*/
}
int main(int argc, char **argv)
{
printf("\n");
// set device
hipDeviceProp_t device_prop;
//long long int dev_id = findCudaDevice(argc, (const char **) argv);
long long int dev_id = 0;
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id));
float clock_rate = (float) peak_clk;
printf("clock_rate:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == hipComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
if (device_prop.concurrentManagedAccess == 1){
printf("This device supports concurrent Managed Access.\n");
}else{
printf("This device does not support concurrent Managed Access.\n");
}
int value1 = 1;
checkCudaErrors(hipDeviceGetAttribute(&value1, hipDeviceAttributeConcurrentManagedAccess, dev_id));
printf("hipDeviceAttributeConcurrentManagedAccess = %d\n", value1);
//plain managed
printf("###################\n#########################managed\n");
//////////////0.5gb to 64gb stride 0.5gb (1 * 4 * 1024)
for(long long int data_stride = 1 * 1 * 1024; data_stride <= 1 * 1 * 1024; data_stride = data_stride + 1 * 4 * 1024){/////512 is 4m, see what happens after 2m. 128 positions.
//for(long long int data_stride = 1 * 256 * 1024; data_stride <= 1 * 256 * 1024; data_stride = data_stride + 1 * 8 * 1024){
for(long long int block_num = 16; block_num <= 2048; block_num = block_num + 16){////////up to 64gb
//for(long long int block_num = 256; block_num <= 256; block_num = block_num + 16){////////up to 64gb
////134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
for(long long int clock_count = 1; clock_count <= 1; clock_count = clock_count * 2){
///////////////////////////////////////////////////////////////////CPU data begin
//long long int data_size = mod;
long long int data_size = data_stride;
data_size = data_size * block_num;
data_size = data_size * 512;
//long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
long long int *CPU_data_in;
//CPU_data_in = (long long int*)malloc(sizeof(long long int) * data_size);
checkCudaErrors(hipMallocManaged(&CPU_data_in, sizeof(long long int) * data_size));/////////////using unified memory
//init_cpu_data(CPU_data_in, data_size, data_stride);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//long long int *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(long long int) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(long long int) * data_size, hipMemcpyHostToDevice);
/*
///////////////////////////////////////////////////////////////////GPU data out
long long int *GPU_data_out;
//checkCudaErrors(hipMalloc(&GPU_data_out, sizeof(long long int) * data_size));
checkCudaErrors(hipMallocManaged(&GPU_data_out, sizeof(long long int) * data_size));/////////////using unified memory
*/
hipLaunchKernelGGL(( gpu_initialization), dim3(block_num), dim3(512), 0, 0, CPU_data_in, data_stride, data_size);///////////////1024 per block max
hipDeviceSynchronize();
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
hipLaunchKernelGGL(( Page_visitor), dim3(block_num), dim3(512), 0, 0, CPU_data_in, data_stride, clock_count);///////////////1024 per block max
///////////////////////////////////////////////////32 * 512 * 1 * 256 * 1024 = 32gb, 32 * 512 * 1 * 512 * 1024 = 64gb.
hipDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
//checkCudaErrors(hipFree(GPU_data_in));
checkCudaErrors(hipFree(CPU_data_in));
//free(CPU_data_in);
//checkCudaErrors(hipFree(GPU_data_out));
}
printf("\n");
}
}
exit(EXIT_SUCCESS);
}
|
ff723764b928a3c389946c7e8210368cd7e28afe.cu
|
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
#include <sys/time.h>
/////////////////////////////L1 is enabled. "ALL_CCFLAGS += -Xptxas -dlcm=ca"
//////////////large vs small data.
void init_cpu_data(long long int* A, long long int size, long long int stride){
for (long long int i = 0; i < size; i++){
A[i]=1;
}
/*
for (long long int i = 0; i < size - stride; i++){
A[i]=(i + stride);
}
for (long long int i = size - stride; i < size; i++){
A[i]=0;
}
*/
}
__global__ void gpu_initialization(long long int *A, long long int data_stride, long long int data_size){
long long int index = (blockIdx.x * blockDim.x + threadIdx.x);
long long int thread_num = gridDim.x * blockDim.x;
for(long long int it = 0; it < data_size; it = it + thread_num){
A[index + it]=23;
}
}
/*
__global__ void gpu_initialization(long long int *A, long long int data_stride, long long int data_size){
long long int index = (blockIdx.x * blockDim.x + threadIdx.x) * data_stride;
A[index]=23;
}
*/
long long unsigned time_diff(timespec start, timespec end){
struct timespec temp;
if ((end.tv_nsec - start.tv_nsec) < 0){
temp.tv_sec = end.tv_sec - start.tv_sec - 1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
}
else{
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
long long unsigned time_interval_ns = temp.tv_nsec;
long long unsigned time_interval_s = temp.tv_sec;
time_interval_s = time_interval_s * 1000000000;
return time_interval_s + time_interval_ns;
}
//__global__ void Page_visitor(long long int *A, long long int *B, long long int data_stride, long long int clock_count){
__global__ void Page_visitor(long long int *A, long long int data_stride, long long int clock_count){////load-compute -store
/*
long long int index = threadIdx.x;
/////////////////////////////////time
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
if(index = 0){
start_time= clock64();
}
__syncthreads();
*/
long long int index = (blockIdx.x * blockDim.x + threadIdx.x) * data_stride;
long long int value = A[index];
/*
//////////////////////////////////////////////sleep
long long int start_clock = clock64();
long long int clock_offset = 0;
while (clock_offset < clock_count)
{
clock_offset = clock64() - start_clock;
}
*/
//////////////////////////////////////////////loop
long long int clock_offset = 0;
while (clock_offset < clock_count){/////////////////what's the time overhead for addition and multiplication?
clock_offset++;
value = value + threadIdx.x;
}
/*
if(threadIdx.x == 0){/////%tid %ntid %laneid %warpid %nwarpid %ctaid %nctaid %smid %nsmid %gridid
int smid = 1;
asm("mov.u32 %0, %smid;" : "=r"(smid) );
printf("blockIdx.x: %d, smid: %d\n", blockIdx.x, smid);
if(blockIdx.x == 55){
int nsmid = 1;
asm("mov.u32 %0, %smid;" : "=r"(nsmid) );
printf("nsmid: %d\n", nsmid);
}
}
*/
//d_o[0] = clock_offset;
//////////////////////////////////////////////sleep
A[index] = value;
/*
__syncthreads();
/////////////////////////////////time
if(index = 0){
start_time= clock64();
time_interval = end_time - start_time;//////clock
}
//B[0] = time_interval;
*/
}
int main(int argc, char **argv)
{
printf("\n");
// set device
cudaDeviceProp device_prop;
//long long int dev_id = findCudaDevice(argc, (const char **) argv);
long long int dev_id = 0;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id));
float clock_rate = (float) peak_clk;
printf("clock_rate:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == cudaComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
if (device_prop.concurrentManagedAccess == 1){
printf("This device supports concurrent Managed Access.\n");
}else{
printf("This device does not support concurrent Managed Access.\n");
}
int value1 = 1;
checkCudaErrors(cudaDeviceGetAttribute(&value1, cudaDevAttrConcurrentManagedAccess, dev_id));
printf("cudaDevAttrConcurrentManagedAccess = %d\n", value1);
//plain managed
printf("###################\n#########################managed\n");
//////////////0.5gb to 64gb stride 0.5gb (1 * 4 * 1024)
for(long long int data_stride = 1 * 1 * 1024; data_stride <= 1 * 1 * 1024; data_stride = data_stride + 1 * 4 * 1024){/////512 is 4m, see what happens after 2m. 128 positions.
//for(long long int data_stride = 1 * 256 * 1024; data_stride <= 1 * 256 * 1024; data_stride = data_stride + 1 * 8 * 1024){
for(long long int block_num = 16; block_num <= 2048; block_num = block_num + 16){////////up to 64gb
//for(long long int block_num = 256; block_num <= 256; block_num = block_num + 16){////////up to 64gb
////134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
for(long long int clock_count = 1; clock_count <= 1; clock_count = clock_count * 2){
///////////////////////////////////////////////////////////////////CPU data begin
//long long int data_size = mod;
long long int data_size = data_stride;
data_size = data_size * block_num;
data_size = data_size * 512;
//long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
long long int *CPU_data_in;
//CPU_data_in = (long long int*)malloc(sizeof(long long int) * data_size);
checkCudaErrors(cudaMallocManaged(&CPU_data_in, sizeof(long long int) * data_size));/////////////using unified memory
//init_cpu_data(CPU_data_in, data_size, data_stride);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//long long int *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(long long int) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(long long int) * data_size, cudaMemcpyHostToDevice);
/*
///////////////////////////////////////////////////////////////////GPU data out
long long int *GPU_data_out;
//checkCudaErrors(cudaMalloc(&GPU_data_out, sizeof(long long int) * data_size));
checkCudaErrors(cudaMallocManaged(&GPU_data_out, sizeof(long long int) * data_size));/////////////using unified memory
*/
gpu_initialization<<<block_num, 512>>>(CPU_data_in, data_stride, data_size);///////////////1024 per block max
cudaDeviceSynchronize();
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
Page_visitor<<<block_num, 512>>>(CPU_data_in, data_stride, clock_count);///////////////1024 per block max
///////////////////////////////////////////////////32 * 512 * 1 * 256 * 1024 = 32gb, 32 * 512 * 1 * 512 * 1024 = 64gb.
cudaDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
//checkCudaErrors(cudaFree(GPU_data_in));
checkCudaErrors(cudaFree(CPU_data_in));
//free(CPU_data_in);
//checkCudaErrors(cudaFree(GPU_data_out));
}
printf("\n");
}
}
exit(EXIT_SUCCESS);
}
|
55fd2377d7bb6c49ffcd32ba99ab858723aa79ce.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define USE_MNIST_LOADER
#define MNIST_DOUBLE
// includes, system
#include <string>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <math.h>
#include <time.h>
#include "layer.h"
#include "layer.hip"
struct mnist_data {
double data[28][28];
int label; //0-9
};
// set Layer
static Layer l_input = Layer(0, 0, 28*28);
static Layer l_c1 = Layer(5*5, 6, 24*24*6);
static Layer l_p = Layer(4*4, 1, 6*6*6);
static Layer l_f = Layer(6*6*6, 10, 10);
// static Layer l_f1 = Layer(6*6*6, 10, 36);
// static Layer l_f2 = Layer(36, 1, 10);
static mnist_data *train_set, *test_set;
static unsigned int train_cnt, test_cnt;
unsigned int dataToInt(char* c) {
unsigned int d = 0;
for (int i = 0; i < 4; i++) {
d <<= 8;
d |= (unsigned char)c[i];
}
return d;
}
int mnist_load(
const char *image_filename,
const char *label_filename,
mnist_data **data,
unsigned int *count)
{
char tmp[4];
unsigned char read_data[28*28];
unsigned int im, l, i, j, k, ic1, ic2, image_cnt, label_cnt;
FILE *ifp = fopen(image_filename, "rb");
FILE *lfp = fopen(label_filename, "rb");
if (!ifp || !lfp) {
printf("file not open");
if (ifp) fclose(ifp);
if (lfp) fclose(lfp);
return -1;
}
fread(tmp, 1, 4, ifp);
im = dataToInt(tmp);
fread(tmp, 1, 4, lfp);
l = dataToInt(tmp);
fread(tmp, 1, 4, ifp);
image_cnt = dataToInt(tmp);
fread(tmp, 1, 4, lfp);
label_cnt = dataToInt(tmp);
fread(tmp, 1, 4, ifp);
ic1 = dataToInt(tmp);
fread(tmp, 1, 4, ifp);
ic2 = dataToInt(tmp);
// printf("im, l, image_cnt, label_cnt, ic1, ic2 \n");
// printf("%d, %d, %d, %d, %d, %d \n", im, l, image_cnt, label_cnt, ic1, ic2);
if(im != 2051 || l != 2049 || image_cnt != label_cnt || ic1 != 28 || ic2 != 28){
printf("get wrong file");
fclose(ifp);
fclose(lfp);
return -2;
}
*count = image_cnt;
*data = (mnist_data *)malloc(sizeof(mnist_data) * image_cnt);
for (i = 0; i < image_cnt; i++) {
mnist_data *d = &(*data)[i];
fread(read_data, 1, 28*28, ifp);
for(j=0; j<28; j++){
for(k=0; k<28; k++)
d->data[j][k] = read_data[j*28+k]/255.0;
}
fread(tmp, 1, 1, lfp);
d->label = tmp[0]%10;
}
fclose(ifp);
fclose(lfp);
return 0;
}
static inline void loadData(){
clock_t t;
t = clock();
mnist_load("MNIST_data/train-images.idx3-ubyte", "MNIST_data/train-labels.idx1-ubyte",
&train_set, &train_cnt);
mnist_load("MNIST_data/t10k-images.idx3-ubyte", "MNIST_data/t10k-labels.idx1-ubyte",
&test_set, &test_cnt);
t = clock() - t;
float load_time = (float)t/CLOCKS_PER_SEC;
printf("loadData spend %.2f seconds \n", load_time);
}
static float forward(const double data[28][28]){
// printf("run forward\n");
float input[28][28];
for (int i = 0; i<28; i++){
for (int j = 0; j<28; j++){
input[i][j] = data[i][j];
// printf("%.2f ", data[i][j]);
}
// printf("\n");
}
l_input.clear();
l_c1.clear();
l_p.clear();
l_f.clear();
// printf("**************************************\n");
//example for convLayer 1:
l_input.setInput((float *)input);
// hipMemcpyToSymbol(conv_input, input, sizeof(float) * 28 * 28);
//printf("input image: %f\n", &l_input.output[0][0]);
//timer
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
int bz;
bz = ceil((float)24/TILE_WIDTH)*ceil((float)24/TILE_WIDTH);
dim3 gridDim(1, 6, bz);
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
//constant memory test
// ConvLayerForward_Kernel<<<gridDim,blockDim>>>((float (*)[24][24])l_c1.preact, (float (*)[5][5])l_c1.weight, l_c1.bias, 1, 28, 28, 24, 5, 6);
hipLaunchKernelGGL(( ConvLayerForward_Kernel_1), dim3(gridDim),dim3(blockDim), 0, 0, (float (*)[28])l_input.output, (float (*)[24][24])l_c1.preact, (float (*)[5][5])l_c1.weight, l_c1.bias, 1, 28, 28, 24, 5, 6);
hipLaunchKernelGGL(( apply_sigmoid) , dim3(64),dim3(64), 0, 0, l_c1.preact, l_c1.output, l_c1.bytes);
// for pooling layer example:
dim3 gridDimPool(1, 1, 1);
dim3 blockDimPool(6, 6, 6);
hipLaunchKernelGGL(( PoolLayerForward_Kernel), dim3(gridDimPool),dim3(blockDimPool), 0, 0, (float (*)[24][24])l_c1.output, (float (*)[6][6])l_p.preact, (float (*)[4][4])l_p.weight, l_p.bias, 24, 24, 6, 4);
// AvgPoolLayerForward_Kernel<<<gridDimPool,blockDimPool>>>((float (*)[24][24])l_c1.output, (float (*)[6][6])l_p.preact, 24, 24, 6, 4);
hipLaunchKernelGGL(( apply_sigmoid) , dim3(64),dim3(64), 0, 0, l_p.preact, l_p.output, l_p.bytes);
// for fully connected layer
dim3 gridDimfc(1, 1, 1);
dim3 blockDimfc(10, 1, 1);
hipLaunchKernelGGL(( FullyConLayerForward_kernel), dim3(gridDimfc),dim3(blockDimfc), 0, 0, (float (*)[6][6])l_p.output, (float (*)[6][6][6])l_f.weight, l_f.preact, l_f.bias, 1, 6, 10, 1, 10);
hipLaunchKernelGGL(( apply_sigmoid), dim3(64), dim3(64), 0, 0, l_f.preact, l_f.output, l_f.bytes);
//end timer:
hipEventRecord(stop, 0);
hipEventSynchronize(stop); // after hipEventRecord
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
return time;
}
static float backward(){
//timer
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
dim3 gridDimfc(1, 10, 1);
dim3 blockDimfc(6, 6, 6);
hipLaunchKernelGGL(( FullyConLayerBackward_kernel), dim3(gridDimfc), dim3(blockDimfc), 0, 0,
l_f.b_preact,
l_f.bias,
(float (*)[6][6][6]) l_f.weight,
(float (*)[6][6])l_p.output,
(float (*)[6][6])l_p.b_output);
dim3 gridDims(1, 1, 1);
dim3 blockDims(6, 6, 6);
hipLaunchKernelGGL(( PoolLayerBackward_Kernel), dim3(gridDims), dim3(blockDims), 0, 0,
(float (*)[6][6])l_p.preact,
(float (*)[6][6])l_p.b_output,
(float (*)[4][4])l_p.b_weight,
(float (*)[4][4])l_p.weight,
(float (*)[24][24])l_c1.output,
(float (*)[24][24])l_c1.b_output,
l_p.bias);
// AvgPoolLayerBackward_Kernel<<<gridDims, blockDims>>>(
// (float (*)[6][6])l_p.preact,
// (float (*)[24][24])l_c1.b_output,
// 4 );
dim3 gridDimc(1, 6, 1);
dim3 blockDimc(24, 24, 1);
hipLaunchKernelGGL(( ConvLayerBackward_Kernel), dim3(gridDimc), dim3(blockDimc), 0, 0,
(float (*)[24][24])l_c1.preact,
(float (*)[24][24])l_c1.b_output,
(float (*)[5][5])l_c1.weight,
(float (*)[28])l_input.output,
l_c1.bias);
hipEventRecord(stop, 0);
hipEventSynchronize(stop); // after hipEventRecord
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
return time;
}
static void learn(){
float time_taken = 0.0;
clock_t t;
t = clock();
for(int i=0; i< train_cnt; i++){
//for(int i=0; i<10; i++){
// printf("label: %d \n", train_set[i].label);
l_f.bp_clear();
l_p.bp_clear();
l_c1.bp_clear();
time_taken += forward(train_set[i].data);
hipLaunchKernelGGL(( loss_func), dim3(1), dim3(10), 0, 0, l_f.b_preact, l_f.output, train_set[i].label, 10);
time_taken += backward();
}
printf("time on GPU: %.5f seconds\n", time_taken / 1000);
t = clock() - t;
float cpu_time = (float)t/CLOCKS_PER_SEC;
printf("Total spend %.2f seconds \n", cpu_time);
}
static unsigned int classify(double data[28][28])
{
float res[10];
forward(data);
unsigned int max = 0;
hipMemcpy(res, l_f.output, sizeof(float) * 10, hipMemcpyDeviceToHost);
// hipMemcpy(res, l_f.b_preact, sizeof(float) * 10, hipMemcpyDeviceToHost);
for (int i = 1; i < 10; ++i) {
if (res[max] < res[i]) {
max = i;
}
}
return max;
}
// Perform forward propagation of test data
static void test()
{
int error = 0;
for (int i = 0; i < test_cnt; ++i) {
if (classify(test_set[i].data) != test_set[i].label) {
++error;
}
}
printf("Test Accuracy:: %.2f%%\n", 100 - ( double(error) / double(test_cnt) * 100.0));
}
int main(){
int epoch = 5;
printf("CNN CUDA version result: \n");
printf("Number of epoch: %d \n\n", epoch);
loadData();
for (int i = 0; i < epoch; i++){
printf("epoch: %d \n", i + 1);
learn();
test();
}
printf("finish\n");
return 0;
}
|
55fd2377d7bb6c49ffcd32ba99ab858723aa79ce.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define USE_MNIST_LOADER
#define MNIST_DOUBLE
// includes, system
#include <string>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <math.h>
#include <time.h>
#include "layer.h"
#include "layer.cu"
struct mnist_data {
double data[28][28];
int label; //0-9
};
// set Layer
static Layer l_input = Layer(0, 0, 28*28);
static Layer l_c1 = Layer(5*5, 6, 24*24*6);
static Layer l_p = Layer(4*4, 1, 6*6*6);
static Layer l_f = Layer(6*6*6, 10, 10);
// static Layer l_f1 = Layer(6*6*6, 10, 36);
// static Layer l_f2 = Layer(36, 1, 10);
static mnist_data *train_set, *test_set;
static unsigned int train_cnt, test_cnt;
unsigned int dataToInt(char* c) {
unsigned int d = 0;
for (int i = 0; i < 4; i++) {
d <<= 8;
d |= (unsigned char)c[i];
}
return d;
}
int mnist_load(
const char *image_filename,
const char *label_filename,
mnist_data **data,
unsigned int *count)
{
char tmp[4];
unsigned char read_data[28*28];
unsigned int im, l, i, j, k, ic1, ic2, image_cnt, label_cnt;
FILE *ifp = fopen(image_filename, "rb");
FILE *lfp = fopen(label_filename, "rb");
if (!ifp || !lfp) {
printf("file not open");
if (ifp) fclose(ifp);
if (lfp) fclose(lfp);
return -1;
}
fread(tmp, 1, 4, ifp);
im = dataToInt(tmp);
fread(tmp, 1, 4, lfp);
l = dataToInt(tmp);
fread(tmp, 1, 4, ifp);
image_cnt = dataToInt(tmp);
fread(tmp, 1, 4, lfp);
label_cnt = dataToInt(tmp);
fread(tmp, 1, 4, ifp);
ic1 = dataToInt(tmp);
fread(tmp, 1, 4, ifp);
ic2 = dataToInt(tmp);
// printf("im, l, image_cnt, label_cnt, ic1, ic2 \n");
// printf("%d, %d, %d, %d, %d, %d \n", im, l, image_cnt, label_cnt, ic1, ic2);
if(im != 2051 || l != 2049 || image_cnt != label_cnt || ic1 != 28 || ic2 != 28){
printf("get wrong file");
fclose(ifp);
fclose(lfp);
return -2;
}
*count = image_cnt;
*data = (mnist_data *)malloc(sizeof(mnist_data) * image_cnt);
for (i = 0; i < image_cnt; i++) {
mnist_data *d = &(*data)[i];
fread(read_data, 1, 28*28, ifp);
for(j=0; j<28; j++){
for(k=0; k<28; k++)
d->data[j][k] = read_data[j*28+k]/255.0;
}
fread(tmp, 1, 1, lfp);
d->label = tmp[0]%10;
}
fclose(ifp);
fclose(lfp);
return 0;
}
static inline void loadData(){
clock_t t;
t = clock();
mnist_load("MNIST_data/train-images.idx3-ubyte", "MNIST_data/train-labels.idx1-ubyte",
&train_set, &train_cnt);
mnist_load("MNIST_data/t10k-images.idx3-ubyte", "MNIST_data/t10k-labels.idx1-ubyte",
&test_set, &test_cnt);
t = clock() - t;
float load_time = (float)t/CLOCKS_PER_SEC;
printf("loadData spend %.2f seconds \n", load_time);
}
static float forward(const double data[28][28]){
// printf("run forward\n");
float input[28][28];
for (int i = 0; i<28; i++){
for (int j = 0; j<28; j++){
input[i][j] = data[i][j];
// printf("%.2f ", data[i][j]);
}
// printf("\n");
}
l_input.clear();
l_c1.clear();
l_p.clear();
l_f.clear();
// printf("**************************************\n");
//example for convLayer 1:
l_input.setInput((float *)input);
// cudaMemcpyToSymbol(conv_input, input, sizeof(float) * 28 * 28);
//printf("input image: %f\n", &l_input.output[0][0]);
//timer
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
int bz;
bz = ceil((float)24/TILE_WIDTH)*ceil((float)24/TILE_WIDTH);
dim3 gridDim(1, 6, bz);
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
//constant memory test
// ConvLayerForward_Kernel<<<gridDim,blockDim>>>((float (*)[24][24])l_c1.preact, (float (*)[5][5])l_c1.weight, l_c1.bias, 1, 28, 28, 24, 5, 6);
ConvLayerForward_Kernel_1<<<gridDim,blockDim>>>((float (*)[28])l_input.output, (float (*)[24][24])l_c1.preact, (float (*)[5][5])l_c1.weight, l_c1.bias, 1, 28, 28, 24, 5, 6);
apply_sigmoid <<<64,64>>>(l_c1.preact, l_c1.output, l_c1.bytes);
// for pooling layer example:
dim3 gridDimPool(1, 1, 1);
dim3 blockDimPool(6, 6, 6);
PoolLayerForward_Kernel<<<gridDimPool,blockDimPool>>>((float (*)[24][24])l_c1.output, (float (*)[6][6])l_p.preact, (float (*)[4][4])l_p.weight, l_p.bias, 24, 24, 6, 4);
// AvgPoolLayerForward_Kernel<<<gridDimPool,blockDimPool>>>((float (*)[24][24])l_c1.output, (float (*)[6][6])l_p.preact, 24, 24, 6, 4);
apply_sigmoid <<<64,64>>>(l_p.preact, l_p.output, l_p.bytes);
// for fully connected layer
dim3 gridDimfc(1, 1, 1);
dim3 blockDimfc(10, 1, 1);
FullyConLayerForward_kernel<<<gridDimfc,blockDimfc>>>((float (*)[6][6])l_p.output, (float (*)[6][6][6])l_f.weight, l_f.preact, l_f.bias, 1, 6, 10, 1, 10);
apply_sigmoid<<<64, 64>>>(l_f.preact, l_f.output, l_f.bytes);
//end timer:
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop); // after cudaEventRecord
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return time;
}
static float backward(){
//timer
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
dim3 gridDimfc(1, 10, 1);
dim3 blockDimfc(6, 6, 6);
FullyConLayerBackward_kernel<<<gridDimfc, blockDimfc>>>(
l_f.b_preact,
l_f.bias,
(float (*)[6][6][6]) l_f.weight,
(float (*)[6][6])l_p.output,
(float (*)[6][6])l_p.b_output);
dim3 gridDims(1, 1, 1);
dim3 blockDims(6, 6, 6);
PoolLayerBackward_Kernel<<<gridDims, blockDims>>>(
(float (*)[6][6])l_p.preact,
(float (*)[6][6])l_p.b_output,
(float (*)[4][4])l_p.b_weight,
(float (*)[4][4])l_p.weight,
(float (*)[24][24])l_c1.output,
(float (*)[24][24])l_c1.b_output,
l_p.bias);
// AvgPoolLayerBackward_Kernel<<<gridDims, blockDims>>>(
// (float (*)[6][6])l_p.preact,
// (float (*)[24][24])l_c1.b_output,
// 4 );
dim3 gridDimc(1, 6, 1);
dim3 blockDimc(24, 24, 1);
ConvLayerBackward_Kernel<<<gridDimc, blockDimc>>>(
(float (*)[24][24])l_c1.preact,
(float (*)[24][24])l_c1.b_output,
(float (*)[5][5])l_c1.weight,
(float (*)[28])l_input.output,
l_c1.bias);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop); // after cudaEventRecord
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return time;
}
static void learn(){
float time_taken = 0.0;
clock_t t;
t = clock();
for(int i=0; i< train_cnt; i++){
//for(int i=0; i<10; i++){
// printf("label: %d \n", train_set[i].label);
l_f.bp_clear();
l_p.bp_clear();
l_c1.bp_clear();
time_taken += forward(train_set[i].data);
loss_func<<<1, 10>>>(l_f.b_preact, l_f.output, train_set[i].label, 10);
time_taken += backward();
}
printf("time on GPU: %.5f seconds\n", time_taken / 1000);
t = clock() - t;
float cpu_time = (float)t/CLOCKS_PER_SEC;
printf("Total spend %.2f seconds \n", cpu_time);
}
static unsigned int classify(double data[28][28])
{
float res[10];
forward(data);
unsigned int max = 0;
cudaMemcpy(res, l_f.output, sizeof(float) * 10, cudaMemcpyDeviceToHost);
// cudaMemcpy(res, l_f.b_preact, sizeof(float) * 10, cudaMemcpyDeviceToHost);
for (int i = 1; i < 10; ++i) {
if (res[max] < res[i]) {
max = i;
}
}
return max;
}
// Perform forward propagation of test data
static void test()
{
int error = 0;
for (int i = 0; i < test_cnt; ++i) {
if (classify(test_set[i].data) != test_set[i].label) {
++error;
}
}
printf("Test Accuracy:: %.2f%%\n", 100 - ( double(error) / double(test_cnt) * 100.0));
}
int main(){
int epoch = 5;
printf("CNN CUDA version result: \n");
printf("Number of epoch: %d \n\n", epoch);
loadData();
for (int i = 0; i < epoch; i++){
printf("epoch: %d \n", i + 1);
learn();
test();
}
printf("finish\n");
return 0;
}
|
292a40a4afd195c73fef7cac6d17d89ae8cf0dd7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@author Mark Gates
@author Azzam Haidar
@precisions normal z -> s d c
*/
#include "magma_internal.h"
// To deal with really large matrices, this launchs multiple super blocks,
// each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64.
// CUDA architecture 2.0 limits each grid dimension to 64K-1.
// Instances arose for vectors used by sparse matrices with M > 4194240, though N is small.
const magma_int_t max_blocks = 65535;
// BLK_X and BLK_Y need to be equal for zlaset_q to deal with diag & offdiag
// when looping over super blocks.
// Formerly, BLK_X and BLK_Y could be different.
#define BLK_X 64
#define BLK_Y BLK_X
/******************************************************************************/
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd.
*/
static __device__
void zlacpy_full_device(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/******************************************************************************/
/*
Similar to zlacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to zlaset, zlacpy, zlat2c, clat2z.
*/
static __device__
void zlacpy_lower_device(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/******************************************************************************/
/*
Similar to zlacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to zlaset, zlacpy, zlat2c, clat2z.
*/
static __device__
void zlacpy_upper_device(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
/******************************************************************************/
/*
kernel wrappers to call the device functions.
*/
__global__
void zlacpy_full_kernel(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
zlacpy_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void zlacpy_lower_kernel(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
zlacpy_lower_device(m, n, dA, ldda, dB, lddb);
}
__global__
void zlacpy_upper_kernel(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
zlacpy_upper_device(m, n, dA, ldda, dB, lddb);
}
/******************************************************************************/
/*
kernel wrappers to call the device functions for the batched routine.
*/
__global__
void zlacpy_full_kernel_batched(
int m, int n,
magmaDoubleComplex const * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
zlacpy_full_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void zlacpy_lower_kernel_batched(
int m, int n,
magmaDoubleComplex const * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
zlacpy_lower_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void zlacpy_upper_kernel_batched(
int m, int n,
magmaDoubleComplex const * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
zlacpy_upper_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
/***************************************************************************//**
Purpose
-------
ZLACPY copies all or part of a two-dimensional matrix dA to another
matrix dB.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[out]
dB COMPLEX_16 array, dimension (LDDB,N)
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lacpy
*******************************************************************************/
extern "C" void
magmablas_zlacpy_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
#define dB(i_, j_) (dB + (i_) + (j_)*lddb)
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
assert( BLK_X == BLK_Y );
const magma_int_t super_NB = max_blocks*BLK_X;
dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) );
dim3 threads( BLK_X, 1 );
dim3 grid;
magma_int_t mm, nn;
if ( uplo == MagmaLower ) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( zlacpy_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
hipLaunchKernelGGL(( zlacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else if ( uplo == MagmaUpper ) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( zlacpy_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
hipLaunchKernelGGL(( zlacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else {
// TODO: use hipMemcpy or hipMemcpy2D ?
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y; ++j ) { // full row
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
hipLaunchKernelGGL(( zlacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
/***************************************************************************//**
Purpose
-------
ZLACPY_BATCHED copies all or part of each two-dimensional matrix
dAarray[i] to matrix dBarray[i], for 0 <= i < batchcount.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of each matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of each matrix dA
@param[in]
m INTEGER
The number of rows of each matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dA. N >= 0.
@param[in]
dAarray COMPLEX_16* array, dimension (batchCount)
Array of pointers to the matrices dA, where each dA is of dimension (LDDA,N).
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of each array dA. LDDA >= max(1,M).
@param[out]
dBarray COMPLEX_16* array, dimension (batchCount)
Array of pointers to the matrices dB, where each dB is of dimension (LDDB,N).
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of each array dB. LDDB >= max(1,M).
@param[in]
batchCount Number of matrices in dAarray and dBarray.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lacpy_batched
*******************************************************************************/
extern "C" void
magmablas_zlacpy_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaDoubleComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount );
if ( uplo == MagmaLower ) {
hipLaunchKernelGGL(( zlacpy_lower_kernel_batched)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dAarray, ldda, dBarray, lddb );
}
else if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( zlacpy_upper_kernel_batched)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dAarray, ldda, dBarray, lddb );
}
else {
hipLaunchKernelGGL(( zlacpy_full_kernel_batched)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dAarray, ldda, dBarray, lddb );
}
}
|
292a40a4afd195c73fef7cac6d17d89ae8cf0dd7.cu
|
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@author Mark Gates
@author Azzam Haidar
@precisions normal z -> s d c
*/
#include "magma_internal.h"
// To deal with really large matrices, this launchs multiple super blocks,
// each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64.
// CUDA architecture 2.0 limits each grid dimension to 64K-1.
// Instances arose for vectors used by sparse matrices with M > 4194240, though N is small.
const magma_int_t max_blocks = 65535;
// BLK_X and BLK_Y need to be equal for zlaset_q to deal with diag & offdiag
// when looping over super blocks.
// Formerly, BLK_X and BLK_Y could be different.
#define BLK_X 64
#define BLK_Y BLK_X
/******************************************************************************/
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd.
*/
static __device__
void zlacpy_full_device(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/******************************************************************************/
/*
Similar to zlacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to zlaset, zlacpy, zlat2c, clat2z.
*/
static __device__
void zlacpy_lower_device(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/******************************************************************************/
/*
Similar to zlacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to zlaset, zlacpy, zlat2c, clat2z.
*/
static __device__
void zlacpy_upper_device(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
/******************************************************************************/
/*
kernel wrappers to call the device functions.
*/
__global__
void zlacpy_full_kernel(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
zlacpy_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void zlacpy_lower_kernel(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
zlacpy_lower_device(m, n, dA, ldda, dB, lddb);
}
__global__
void zlacpy_upper_kernel(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
zlacpy_upper_device(m, n, dA, ldda, dB, lddb);
}
/******************************************************************************/
/*
kernel wrappers to call the device functions for the batched routine.
*/
__global__
void zlacpy_full_kernel_batched(
int m, int n,
magmaDoubleComplex const * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
zlacpy_full_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void zlacpy_lower_kernel_batched(
int m, int n,
magmaDoubleComplex const * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
zlacpy_lower_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void zlacpy_upper_kernel_batched(
int m, int n,
magmaDoubleComplex const * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
zlacpy_upper_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
/***************************************************************************//**
Purpose
-------
ZLACPY copies all or part of a two-dimensional matrix dA to another
matrix dB.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[out]
dB COMPLEX_16 array, dimension (LDDB,N)
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lacpy
*******************************************************************************/
extern "C" void
magmablas_zlacpy_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
#define dB(i_, j_) (dB + (i_) + (j_)*lddb)
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
assert( BLK_X == BLK_Y );
const magma_int_t super_NB = max_blocks*BLK_X;
dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) );
dim3 threads( BLK_X, 1 );
dim3 grid;
magma_int_t mm, nn;
if ( uplo == MagmaLower ) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
zlacpy_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
zlacpy_full_kernel <<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else if ( uplo == MagmaUpper ) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
zlacpy_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
zlacpy_full_kernel <<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else {
// TODO: use cudaMemcpy or cudaMemcpy2D ?
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y; ++j ) { // full row
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
zlacpy_full_kernel <<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
/***************************************************************************//**
Purpose
-------
ZLACPY_BATCHED copies all or part of each two-dimensional matrix
dAarray[i] to matrix dBarray[i], for 0 <= i < batchcount.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of each matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of each matrix dA
@param[in]
m INTEGER
The number of rows of each matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dA. N >= 0.
@param[in]
dAarray COMPLEX_16* array, dimension (batchCount)
Array of pointers to the matrices dA, where each dA is of dimension (LDDA,N).
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of each array dA. LDDA >= max(1,M).
@param[out]
dBarray COMPLEX_16* array, dimension (batchCount)
Array of pointers to the matrices dB, where each dB is of dimension (LDDB,N).
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of each array dB. LDDB >= max(1,M).
@param[in]
batchCount Number of matrices in dAarray and dBarray.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lacpy_batched
*******************************************************************************/
extern "C" void
magmablas_zlacpy_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaDoubleComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount );
if ( uplo == MagmaLower ) {
zlacpy_lower_kernel_batched
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, dAarray, ldda, dBarray, lddb );
}
else if ( uplo == MagmaUpper ) {
zlacpy_upper_kernel_batched
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, dAarray, ldda, dBarray, lddb );
}
else {
zlacpy_full_kernel_batched
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, dAarray, ldda, dBarray, lddb );
}
}
|
54f91aa8272f06a0dd64664d1535e63a474f9fa9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<cuda_runtime.h>
#include<assert.h>
#include<vector>
#include<algorithm>
#include"utils.h"
#include"scan.h"
//! Function Prototypes
void binary_radix_sort(std::vector<int> & in, std::vector<int> & out);
void seq_bit_remap(const std::vector<int> &to_remap, std::vector<int> &remapped,
const std::vector<int> &block_totals, const std::vector<int> &block_offsets,
const dim3 gridDims, const dim3 blockDims);
__global__ void kernel_radix_sort(const int * d_to_sort, int * d_sorted);
float cuda_parallel_radix_sort(std::vector<int> & to_sort, std::vector<int> & sorted);
#define BLOCK_SIZE (1 << 10)
int main(void) {
reset_cuda_devs();
srand( 0 /*time(NULL)*/);
printf("Vector Size %i\n", BLOCK_SIZE);
printf("Estimated Memory Usage is %f MB\n", (float) (BLOCK_SIZE * sizeof(int)) / 1e6 * 4.0);
printf("Allocating four vectors\n");
std::vector<int> seq_to_sort(BLOCK_SIZE, 0);
std::vector<int> sorted(BLOCK_SIZE, 0);
std::vector<int> gpu_sorted(BLOCK_SIZE, 0);
incremental_fill(seq_to_sort);
shuffle(seq_to_sort);
// First sort using the sequential version of radix sort.
printf("Performing Sequential Sort\n");
clock_t t = clock();
binary_radix_sort(seq_to_sort, sorted);
t = clock();
float t_sec = (float)t / (float) CLOCKS_PER_SEC;
if(t_sec < 1) {
printf("Done. Took %0.2f ms\n", t_sec * 1e3);
} else {
printf("Done. Took %0.2f s\n", t_sec );
}
// Implement gpu radix sort algorithm.
printf("Performing Parallel Sort\n");
printf("\tTo analyze the performance run 'nvprof ./radix_sort'. \n");
float cuda_runtime_ms = cuda_parallel_radix_sort(seq_to_sort, gpu_sorted);
if(cuda_runtime_ms < 1000.0) {
printf("Done. Took %0.2f ms.\n", cuda_runtime_ms);
} else {
printf("Done. Took %0.2f s.\n", cuda_runtime_ms / 1000.0);
}
int miss_index = equal(sorted, gpu_sorted);
if( miss_index != -1 ) {
printf("Expected %i got %i at index %i\n", sorted[miss_index], gpu_sorted[miss_index], miss_index);
} else {
printf("Success!\n");
}
return 0;
}
/// Parallel version of the radix sort kernel.
/// This modified version of the parallel sort algorithm will only perform a single pass based on the
/// exponent passed in.
///
/// \param[in, out] d_in: The unsorted set of elements.
/// \param[in, out] d_out: The sorted set of elements.
__global__ void kernel_radix_sort(const int * d_to_sort, int * d_sorted) {
// Allocate the necessary static shared memory.
// Hint: Doesn't need to be larger than the block size.
// Copy to the shared memroy.
// Loop through all of the bits sequence of numbers to sort. i.e. 32 bits for an integer.
// Calculate the predicate array for the target bits in the sequence of numbers.
// Perform a scan on the predicate shared array.
// Construct the scatter indexes from the scanned array.
// Copy the elements from the unsorted shared array to the sorted shared array.
// Swap the shared unsorted and sorted array.
//End Loop
// Copy from the sorted shared array to the global shared array.
}
/*! \brief Parallel Radix Sort using CUDA.
*
* Calls the necessary functions to perform a GPGPU based radix sort using the CUDA API.
* >>> Requires the definition of BLOCK_SIZE in the source.
*
* @param[in, out] The sequence to sort.
* @param[in] The sorted sequence.
*
* @returns The execution time in milliseconds.
*/
float cuda_parallel_radix_sort(std::vector<int> & to_sort, std::vector<int> & sorted) {
// For timing
hipEvent_t start, stop;
float cuda_elapsed_time_ms = 0.0;
// Initialize and begin the timers
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start));
// Allocate the memory on the device
// Copy from the host to the device.
// Calculate the block and grid dimensions.
// Sort the sequence in parallel.
// call the kernel.
checkCudaErrors(hipGetLastError()); // call this after executing a kernel.
// Copy from the device to the host.
// Free up the device.
// Stop the timers
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&cuda_elapsed_time_ms, start, stop));
return cuda_elapsed_time_ms;
}
/*! \brief Sequential Bit Remap
*
* Serial implementation of the bit remap kernel.
*
* @param[in] to remap.
* @param[in, out] remapped.
* @param[in] block totals.
* @param[in] block offsets.
*/
void seq_bit_remap(const std::vector<int> &to_remap, std::vector<int> &remapped,
const std::vector<int> &block_totals, const std::vector<int> &block_offsets,
const dim3 gridDims, const dim3 blockDims) {
for(unsigned int bIdX = 0; bIdX < (unsigned int) gridDims.x; ++bIdX) {
for(unsigned int tIdX = 0; tIdX < (unsigned int) blockDims.x; ++tIdX) {
unsigned int gIdX = tIdX + bIdX * (unsigned int) blockDims.x;
if(tIdX < (unsigned int) block_totals[bIdX]) {
unsigned int mapping = tIdX + (unsigned int) block_offsets[bIdX];
remapped[mapping] = to_remap[gIdX];
} else {
unsigned int mapping = (tIdX - block_totals[bIdX]) + (unsigned int) block_offsets[gridDims.x + bIdX];
remapped[mapping] = to_remap[gIdX];
}
}
}
}
/*! \brief Binary version of radix sort.
*
* Binary implemenation of radix sort. Function setup such that it is
* easier to compare to the CUDA implementation.
*
* @param[in] the sequence to sort.
* @param[out] the sorted sequence.
*/
void binary_radix_sort(std::vector<int> & in, std::vector<int> & out) {
out = in;
std::vector<int> tmp(in.size(), 0);
for(unsigned int exponent = 0; exponent < sizeof(int) * 8; ++exponent) {
int i_n = 0;
for(unsigned int i = 0; i < tmp.size(); ++i) {
if(!(out[i] & (1 << exponent))) {
tmp[i_n] = out[i];
++i_n;
}
}
for(unsigned int i = 0; i < tmp.size(); ++i) {
if(out[i] & (1 << exponent)) {
tmp[i_n] = out[i];
++i_n;
}
}
out = tmp;
}
}
|
54f91aa8272f06a0dd64664d1535e63a474f9fa9.cu
|
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<cuda_runtime.h>
#include<assert.h>
#include<vector>
#include<algorithm>
#include"utils.h"
#include"scan.h"
//! Function Prototypes
void binary_radix_sort(std::vector<int> & in, std::vector<int> & out);
void seq_bit_remap(const std::vector<int> &to_remap, std::vector<int> &remapped,
const std::vector<int> &block_totals, const std::vector<int> &block_offsets,
const dim3 gridDims, const dim3 blockDims);
__global__ void kernel_radix_sort(const int * d_to_sort, int * d_sorted);
float cuda_parallel_radix_sort(std::vector<int> & to_sort, std::vector<int> & sorted);
#define BLOCK_SIZE (1 << 10)
int main(void) {
reset_cuda_devs();
srand( 0 /*time(NULL)*/);
printf("Vector Size %i\n", BLOCK_SIZE);
printf("Estimated Memory Usage is %f MB\n", (float) (BLOCK_SIZE * sizeof(int)) / 1e6 * 4.0);
printf("Allocating four vectors\n");
std::vector<int> seq_to_sort(BLOCK_SIZE, 0);
std::vector<int> sorted(BLOCK_SIZE, 0);
std::vector<int> gpu_sorted(BLOCK_SIZE, 0);
incremental_fill(seq_to_sort);
shuffle(seq_to_sort);
// First sort using the sequential version of radix sort.
printf("Performing Sequential Sort\n");
clock_t t = clock();
binary_radix_sort(seq_to_sort, sorted);
t = clock();
float t_sec = (float)t / (float) CLOCKS_PER_SEC;
if(t_sec < 1) {
printf("Done. Took %0.2f ms\n", t_sec * 1e3);
} else {
printf("Done. Took %0.2f s\n", t_sec );
}
// Implement gpu radix sort algorithm.
printf("Performing Parallel Sort\n");
printf("\tTo analyze the performance run 'nvprof ./radix_sort'. \n");
float cuda_runtime_ms = cuda_parallel_radix_sort(seq_to_sort, gpu_sorted);
if(cuda_runtime_ms < 1000.0) {
printf("Done. Took %0.2f ms.\n", cuda_runtime_ms);
} else {
printf("Done. Took %0.2f s.\n", cuda_runtime_ms / 1000.0);
}
int miss_index = equal(sorted, gpu_sorted);
if( miss_index != -1 ) {
printf("Expected %i got %i at index %i\n", sorted[miss_index], gpu_sorted[miss_index], miss_index);
} else {
printf("Success!\n");
}
return 0;
}
/// Parallel version of the radix sort kernel.
/// This modified version of the parallel sort algorithm will only perform a single pass based on the
/// exponent passed in.
///
/// \param[in, out] d_in: The unsorted set of elements.
/// \param[in, out] d_out: The sorted set of elements.
__global__ void kernel_radix_sort(const int * d_to_sort, int * d_sorted) {
// Allocate the necessary static shared memory.
// Hint: Doesn't need to be larger than the block size.
// Copy to the shared memroy.
// Loop through all of the bits sequence of numbers to sort. i.e. 32 bits for an integer.
// Calculate the predicate array for the target bits in the sequence of numbers.
// Perform a scan on the predicate shared array.
// Construct the scatter indexes from the scanned array.
// Copy the elements from the unsorted shared array to the sorted shared array.
// Swap the shared unsorted and sorted array.
//End Loop
// Copy from the sorted shared array to the global shared array.
}
/*! \brief Parallel Radix Sort using CUDA.
*
* Calls the necessary functions to perform a GPGPU based radix sort using the CUDA API.
* >>> Requires the definition of BLOCK_SIZE in the source.
*
* @param[in, out] The sequence to sort.
* @param[in] The sorted sequence.
*
* @returns The execution time in milliseconds.
*/
float cuda_parallel_radix_sort(std::vector<int> & to_sort, std::vector<int> & sorted) {
// For timing
cudaEvent_t start, stop;
float cuda_elapsed_time_ms = 0.0;
// Initialize and begin the timers
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start));
// Allocate the memory on the device
// Copy from the host to the device.
// Calculate the block and grid dimensions.
// Sort the sequence in parallel.
// call the kernel.
checkCudaErrors(cudaGetLastError()); // call this after executing a kernel.
// Copy from the device to the host.
// Free up the device.
// Stop the timers
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&cuda_elapsed_time_ms, start, stop));
return cuda_elapsed_time_ms;
}
/*! \brief Sequential Bit Remap
*
* Serial implementation of the bit remap kernel.
*
* @param[in] to remap.
* @param[in, out] remapped.
* @param[in] block totals.
* @param[in] block offsets.
*/
void seq_bit_remap(const std::vector<int> &to_remap, std::vector<int> &remapped,
const std::vector<int> &block_totals, const std::vector<int> &block_offsets,
const dim3 gridDims, const dim3 blockDims) {
for(unsigned int bIdX = 0; bIdX < (unsigned int) gridDims.x; ++bIdX) {
for(unsigned int tIdX = 0; tIdX < (unsigned int) blockDims.x; ++tIdX) {
unsigned int gIdX = tIdX + bIdX * (unsigned int) blockDims.x;
if(tIdX < (unsigned int) block_totals[bIdX]) {
unsigned int mapping = tIdX + (unsigned int) block_offsets[bIdX];
remapped[mapping] = to_remap[gIdX];
} else {
unsigned int mapping = (tIdX - block_totals[bIdX]) + (unsigned int) block_offsets[gridDims.x + bIdX];
remapped[mapping] = to_remap[gIdX];
}
}
}
}
/*! \brief Binary version of radix sort.
*
* Binary implemenation of radix sort. Function setup such that it is
* easier to compare to the CUDA implementation.
*
* @param[in] the sequence to sort.
* @param[out] the sorted sequence.
*/
void binary_radix_sort(std::vector<int> & in, std::vector<int> & out) {
out = in;
std::vector<int> tmp(in.size(), 0);
for(unsigned int exponent = 0; exponent < sizeof(int) * 8; ++exponent) {
int i_n = 0;
for(unsigned int i = 0; i < tmp.size(); ++i) {
if(!(out[i] & (1 << exponent))) {
tmp[i_n] = out[i];
++i_n;
}
}
for(unsigned int i = 0; i < tmp.size(); ++i) {
if(out[i] & (1 << exponent)) {
tmp[i_n] = out[i];
++i_n;
}
}
out = tmp;
}
}
|
8ce0ba2f20a73c2a7cc798ffb802ffcb9852dbcf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <Windows.h>
#include <iostream>
using namespace std;
//B. Por cada elemento
__global__
void matrixAddKernel(float* A, float* B, float* C, int n)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < n*n) C[i] = A[i] + B[i];
}
//C. Por cada fila
__global__
void matrixAddKernel2(float* A, float* B, float* C, int n)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < n)
{
for (int j = i * n; j < i * n + n; j++)
C[j] = A[j] + B[j];
}
}
//D. Por cada columna
__global__
void matrixAddKernel3(float* A, float* B, float* C, int n)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i<n)
for (int j = i; j < n*n; j += n)
C[j] = A[j] + B[j];
}
void matrixAdd(float* A, float* B, float* C, int n) {
int size = n * n * sizeof(float);
float *d_A, *d_B, *d_C;
///A. separar memoria, copiar Input data a Device
hipMalloc((void **)&d_A, size);
hipMemcpy(d_A, A, size, hipMemcpyHostToDevice);
hipMalloc((void **)&d_B, size);
hipMemcpy(d_B, B, size, hipMemcpyHostToDevice);
hipMalloc((void **)&d_C, size);
///Llamada kernel
dim3 DimGrid ((n-1)/256+1,1,1);
dim3 DimBlock (256,1,1);
hipLaunchKernelGGL(( matrixAddKernel1) , dim3(DimGrid),dim3(DimBlock) , 0, 0, d_A, d_B, d_C, n);
/// alternativo ceil((n*n) / 256.0), 256
///tranferir output data a Host
hipMemcpy(C, d_C, size, hipMemcpyDeviceToHost);
///Liberar Memoria
hipFree(d_A); hipFree(d_B); hipFree(d_C);
}
void mostrarM(float* matriz, float fila, float columna)
{
for (int x = 0; x < fila; x++)
{
for (int y = 0; y < columna; y++)
{
int puesto = x*columna + y;
printf("%3.0f ", matriz[puesto]);
}
printf("\n");
}
}
int main() {
int fila;
cout<<"ingrese dimensiones: ";
cin>>fila;
printf(" = \n");
float* A = (float*)malloc(fila*fila*sizeof(float));
float* B = (float*)malloc(fila*fila*sizeof(float));
float* C = (float*)malloc(fila*fila*sizeof(float));
for (int i = 0; i < fila*fila; i++)
{
A[i] = i;
B[i] = i*2;
}
///mostrar
mostrarM(A, fila, fila);
printf(" + \n");
mostrarM(B, fila, fila);
printf(" = \n");
///operaciones
matrixAdd(A, B, C, fila);
mostrarM(C, fila, fila);
system("PAUSE");
exit(0);
}
|
8ce0ba2f20a73c2a7cc798ffb802ffcb9852dbcf.cu
|
#include <stdio.h>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <Windows.h>
#include <iostream>
using namespace std;
//B. Por cada elemento
__global__
void matrixAddKernel(float* A, float* B, float* C, int n)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < n*n) C[i] = A[i] + B[i];
}
//C. Por cada fila
__global__
void matrixAddKernel2(float* A, float* B, float* C, int n)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < n)
{
for (int j = i * n; j < i * n + n; j++)
C[j] = A[j] + B[j];
}
}
//D. Por cada columna
__global__
void matrixAddKernel3(float* A, float* B, float* C, int n)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i<n)
for (int j = i; j < n*n; j += n)
C[j] = A[j] + B[j];
}
void matrixAdd(float* A, float* B, float* C, int n) {
int size = n * n * sizeof(float);
float *d_A, *d_B, *d_C;
///A. separar memoria, copiar Input data a Device
cudaMalloc((void **)&d_A, size);
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_B, size);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_C, size);
///Llamada kernel
dim3 DimGrid ((n-1)/256+1,1,1);
dim3 DimBlock (256,1,1);
matrixAddKernel1 <<<DimGrid,DimBlock >>> (d_A, d_B, d_C, n);
/// alternativo ceil((n*n) / 256.0), 256
///tranferir output data a Host
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost);
///Liberar Memoria
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
}
void mostrarM(float* matriz, float fila, float columna)
{
for (int x = 0; x < fila; x++)
{
for (int y = 0; y < columna; y++)
{
int puesto = x*columna + y;
printf("%3.0f ", matriz[puesto]);
}
printf("\n");
}
}
int main() {
int fila;
cout<<"ingrese dimensiones: ";
cin>>fila;
printf(" = \n");
float* A = (float*)malloc(fila*fila*sizeof(float));
float* B = (float*)malloc(fila*fila*sizeof(float));
float* C = (float*)malloc(fila*fila*sizeof(float));
for (int i = 0; i < fila*fila; i++)
{
A[i] = i;
B[i] = i*2;
}
///mostrar
mostrarM(A, fila, fila);
printf(" + \n");
mostrarM(B, fila, fila);
printf(" = \n");
///operaciones
matrixAdd(A, B, C, fila);
mostrarM(C, fila, fila);
system("PAUSE");
exit(0);
}
|
ff6fe349cbcdfb26a49f03040e3b327ff0ff1ce4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void intArrayAdd(int *size, const int *input, int *output, const int *inFreeArray, int *length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
// copy int array
const int *inArrayBody = &input[ix* *length];
int *outArrayBody = &output[ix* *length];
for (long i = 0; i < *length; i++) {
outArrayBody[i] = inArrayBody[i] + inFreeArray[i];
}
}
}
|
ff6fe349cbcdfb26a49f03040e3b327ff0ff1ce4.cu
|
#include "includes.h"
__global__ void intArrayAdd(int *size, const int *input, int *output, const int *inFreeArray, int *length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
// copy int array
const int *inArrayBody = &input[ix* *length];
int *outArrayBody = &output[ix* *length];
for (long i = 0; i < *length; i++) {
outArrayBody[i] = inArrayBody[i] + inFreeArray[i];
}
}
}
|
4e915555ddd2db7d958d82978be89d712e5c7abb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "rocblas.h"
#include "../debug.h"
/* macro for index calculations */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* matrix size and thread dimensions */
#define SIZE 1024
#define THREADS_PER_BLOCK_X 16
#define THREADS_PER_BLOCK_Y 16
/* naive GPU kernel where each element of C is computed by a single thread */
__global__ void GPU_naive( const int m, double const * const a, double const * const b, double * const c )
{
/* determine my threads's row and col indices in the global C matrix */
const int myrow = FIXME
const int mycol = FIXME
/* if my row and col are in the C matrix, then calculate that value of C */
if( myrow < m && mycol < m )
{
register double temp = 0.0;
/* insert correct index code here */
for( int k = 0; k < m; k++ )
{
temp += a[INDX( FIXME, FIXME, m )] * b[INDX( FIXME, FIXME, m )];
} /* end for */
/* insert index code to write the output to the C matrix */
c[INDX( FIXME, FIXME, m )] = temp;
} /* end if */
return;
} /* end GPU_naive */
int main( int argc, char *argv[] )
{
/* get GPU device number and name */
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
const int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
double *h_a, *h_b, *h_c, *h_c1;
double *d_a, *d_b, *d_c;
size_t numbytes = (size_t ) size * (size_t ) size * sizeof( double );
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_b = (double *) malloc( numbytes );
if( h_b == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_c = (double *) malloc( numbytes );
if( h_c == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_c1 = (double *) malloc( numbytes );
if( h_c1 == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
/* zero out the host memory for C matrices */
memset( h_c, 0, numbytes );
memset( h_c1, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize the A and B matrices */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* allocate a, b, c in gpu memory */
checkCUDA( hipMalloc( (void **)&d_a, numbytes ) );
checkCUDA( hipMalloc( (void **)&d_b, numbytes ) );
checkCUDA( hipMalloc( (void **)&d_c, numbytes ) );
/* copy a and b to device */
checkCUDA( hipMemcpy( d_a, h_a, numbytes, hipMemcpyHostToDevice ) );
checkCUDA( hipMemcpy( d_b, h_b, numbytes, hipMemcpyHostToDevice ) );
hipblasHandle_t handle;
checkCUBLAS( hipblasCreate( &handle ) );
double alpha = 1.0;
double beta = 0.0;
/* start timers */
hipEvent_t start, stop;
checkCUDA( hipEventCreate( &start ) );
checkCUDA( hipEventCreate( &stop ) );
checkCUDA( hipEventRecord( start, 0 ) );
/* call CUBLAS dgemm */
checkCUBLAS(
hipblasDgemm( handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
size, size, size,
&alpha,
d_a, size,
d_b, size,
&beta,
d_c, size )
);
/* stop timers */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
/* print GPU CUBLAS timing information */
fprintf(stdout, "Total time GPU CUBLAS is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C from device to host for error checking */
checkCUDA( hipMemcpy( h_c, d_c, numbytes, hipMemcpyDeviceToHost ) );
/* reset C on device to zero */
checkCUDA( hipMemset( d_c, 0, numbytes ) );
/* setup grid and block sizes */
dim3 threads( THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1 );
dim3 blocks( size / THREADS_PER_BLOCK_X + 1,
size / THREADS_PER_BLOCK_Y + 1, 1 );
/* start timers */
checkCUDA( hipEventRecord( start, 0 ) );
/* call GPU_naive */
hipLaunchKernelGGL(( GPU_naive), dim3(blocks), dim3(threads) , 0, 0, size, d_a, d_b, d_c );
checkKERNEL()
/* stop timers */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
/* print data for GPU naive */
fprintf(stdout, "Total time GPU NAIVE is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C back to host */
checkCUDA( hipMemcpy( h_c1, d_c, numbytes, hipMemcpyDeviceToHost ) );
checkCUBLAS( hipblasDestroy( handle ) );
checkCUDA( hipEventDestroy( start ) );
checkCUDA( hipEventDestroy( stop ) );
/* check CUBLAS versus GPU NAIVE numerical results */
double temp = 0.0;
for( int i = 0; i < size * size; i++ )
{
temp += ( h_c[i] - h_c1[i] ) * ( h_c[i] - h_c1[i] );
} /* end for */
printf("error is %f\n",temp);
if( temp > 10 ) printf("FAIL\n");
else printf("PASS\n");
/* cleanup */
checkCUDA( hipFree( d_a ) );
checkCUDA( hipFree( d_b ) );
checkCUDA( hipFree( d_c ) );
free( h_a );
free( h_b );
free( h_c );
free( h_c1 );
checkCUDA( hipDeviceReset() );
return 0;
}
|
4e915555ddd2db7d958d82978be89d712e5c7abb.cu
|
/*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "cublas_v2.h"
#include "../debug.h"
/* macro for index calculations */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* matrix size and thread dimensions */
#define SIZE 1024
#define THREADS_PER_BLOCK_X 16
#define THREADS_PER_BLOCK_Y 16
/* naive GPU kernel where each element of C is computed by a single thread */
__global__ void GPU_naive( const int m, double const * const a, double const * const b, double * const c )
{
/* determine my threads's row and col indices in the global C matrix */
const int myrow = FIXME
const int mycol = FIXME
/* if my row and col are in the C matrix, then calculate that value of C */
if( myrow < m && mycol < m )
{
register double temp = 0.0;
/* insert correct index code here */
for( int k = 0; k < m; k++ )
{
temp += a[INDX( FIXME, FIXME, m )] * b[INDX( FIXME, FIXME, m )];
} /* end for */
/* insert index code to write the output to the C matrix */
c[INDX( FIXME, FIXME, m )] = temp;
} /* end if */
return;
} /* end GPU_naive */
int main( int argc, char *argv[] )
{
/* get GPU device number and name */
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
const int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
double *h_a, *h_b, *h_c, *h_c1;
double *d_a, *d_b, *d_c;
size_t numbytes = (size_t ) size * (size_t ) size * sizeof( double );
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_b = (double *) malloc( numbytes );
if( h_b == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_c = (double *) malloc( numbytes );
if( h_c == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_c1 = (double *) malloc( numbytes );
if( h_c1 == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
/* zero out the host memory for C matrices */
memset( h_c, 0, numbytes );
memset( h_c1, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize the A and B matrices */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* allocate a, b, c in gpu memory */
checkCUDA( cudaMalloc( (void **)&d_a, numbytes ) );
checkCUDA( cudaMalloc( (void **)&d_b, numbytes ) );
checkCUDA( cudaMalloc( (void **)&d_c, numbytes ) );
/* copy a and b to device */
checkCUDA( cudaMemcpy( d_a, h_a, numbytes, cudaMemcpyHostToDevice ) );
checkCUDA( cudaMemcpy( d_b, h_b, numbytes, cudaMemcpyHostToDevice ) );
cublasHandle_t handle;
checkCUBLAS( cublasCreate( &handle ) );
double alpha = 1.0;
double beta = 0.0;
/* start timers */
cudaEvent_t start, stop;
checkCUDA( cudaEventCreate( &start ) );
checkCUDA( cudaEventCreate( &stop ) );
checkCUDA( cudaEventRecord( start, 0 ) );
/* call CUBLAS dgemm */
checkCUBLAS(
cublasDgemm( handle, CUBLAS_OP_N, CUBLAS_OP_N,
size, size, size,
&alpha,
d_a, size,
d_b, size,
&beta,
d_c, size )
);
/* stop timers */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
/* print GPU CUBLAS timing information */
fprintf(stdout, "Total time GPU CUBLAS is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C from device to host for error checking */
checkCUDA( cudaMemcpy( h_c, d_c, numbytes, cudaMemcpyDeviceToHost ) );
/* reset C on device to zero */
checkCUDA( cudaMemset( d_c, 0, numbytes ) );
/* setup grid and block sizes */
dim3 threads( THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1 );
dim3 blocks( size / THREADS_PER_BLOCK_X + 1,
size / THREADS_PER_BLOCK_Y + 1, 1 );
/* start timers */
checkCUDA( cudaEventRecord( start, 0 ) );
/* call GPU_naive */
GPU_naive<<< blocks, threads >>> ( size, d_a, d_b, d_c );
checkKERNEL()
/* stop timers */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
/* print data for GPU naive */
fprintf(stdout, "Total time GPU NAIVE is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C back to host */
checkCUDA( cudaMemcpy( h_c1, d_c, numbytes, cudaMemcpyDeviceToHost ) );
checkCUBLAS( cublasDestroy( handle ) );
checkCUDA( cudaEventDestroy( start ) );
checkCUDA( cudaEventDestroy( stop ) );
/* check CUBLAS versus GPU NAIVE numerical results */
double temp = 0.0;
for( int i = 0; i < size * size; i++ )
{
temp += ( h_c[i] - h_c1[i] ) * ( h_c[i] - h_c1[i] );
} /* end for */
printf("error is %f\n",temp);
if( temp > 10 ) printf("FAIL\n");
else printf("PASS\n");
/* cleanup */
checkCUDA( cudaFree( d_a ) );
checkCUDA( cudaFree( d_b ) );
checkCUDA( cudaFree( d_c ) );
free( h_a );
free( h_b );
free( h_c );
free( h_c1 );
checkCUDA( cudaDeviceReset() );
return 0;
}
|
3e2b6dfa448db2575b45338bf9c59038ae9c1eee.hip
|
// !!! This is a file automatically generated by hipify!!!
// Compile: nvcc -g -G -arch=sm_52 -std=c++11 assignment5-p1.cu -o assignment5-p1
#include <cmath>
#include <cstdint>
#include <hip/hip_runtime.h>
#include <iostream>
#include <new>
#include <sys/time.h>
#define THRESHOLD (0.000001)
#define SIZE1 8192
#define SIZE2 (SIZE1+8)
#define ITER 100
using std::cerr;
using std::cout;
using std::endl;
__global__ void kernel1(double* d_k1_in) {
// TODO: Fill in
int indexcomb = blockDim.x * blockIdx.x + threadIdx.x;
int j = indexcomb;
if(j<(SIZE1-1)){
for(int k=0;k<ITER;k++){
for(int i=1;i<(SIZE1-1);i++){
double temp=0.0;
temp = d_k1_in[(i - 1)*SIZE1 + j + 1] + d_k1_in[i*SIZE1 + j+1];
temp += d_k1_in[(i+1)*SIZE1 + j+1];
d_k1_in[i*SIZE1 + j+1] = temp;
}
}
}
}
__global__ void kernel2(double* d_k2_in) {
// TODO: Fill in
int indexcomb = blockDim.x * blockIdx.x + threadIdx.x;
int j = indexcomb;
if(j<(SIZE2-1)){
for(int k=0;k<ITER;k++){
int i=1;
//loop unrolled over 4 ways
for(;i+3<(SIZE2-1);i+=4){
double temp=d_k2_in[(i - 1)*SIZE2 + j + 1] + d_k2_in[i*SIZE2 + j+1];
temp += d_k2_in[(i+1)*SIZE2 + j+1];
d_k2_in[i*SIZE2 + j+1] = temp;
temp=d_k2_in[(i)*SIZE2 + j + 1] + d_k2_in[(i+1)*SIZE2 + j+1];
temp += d_k2_in[(i+2)*SIZE2 + j+1];
d_k2_in[(i+1)*SIZE2 + j+1] = temp;
temp=d_k2_in[(i+1)*SIZE2 + j + 1] + d_k2_in[(i+2)*SIZE2 + j+1];
temp += d_k2_in[(i+3)*SIZE2 + j+1];
d_k2_in[(i+2)*SIZE2 + j+1] = temp;
temp=d_k2_in[(i+2)*SIZE2 + j + 1] + d_k2_in[(i+3)*SIZE2 + j+1];
temp += d_k2_in[(i+4)*SIZE2 + j+1];
d_k2_in[(i+3)*SIZE2 + j+1] = temp;
}
for(;i<(SIZE2-1);i++){
double temp=d_k2_in[(i - 1)*SIZE2 + j + 1] + d_k2_in[i*SIZE2 + j+1];
temp += d_k2_in[(i+1)*SIZE2 + j+1];
d_k2_in[i*SIZE2 + j+1] = temp;
}
}
}
}
__host__ void serial(double** h_ser_in) {
for (int k = 0; k < ITER; k++) {
for (int i = 1; i < (SIZE1 - 1); i++) {
for (int j = 0; j < (SIZE1 - 1); j++) {
h_ser_in[i][j + 1] =
(h_ser_in[i - 1][j + 1] + h_ser_in[i][j + 1] + h_ser_in[i + 1][j + 1]);
}
}
}
}
__host__ void check_result(double** w_ref, double** w_opt, uint64_t size) {
double maxdiff = 0.0, this_diff = 0.0;
int numdiffs = 0;
for (uint64_t i = 0; i < size; i++) {
for (uint64_t j = 0; j < size; j++) {
this_diff = w_ref[i][j] - w_opt[i][j];
if (fabs(this_diff) > THRESHOLD) {
numdiffs++;
if (this_diff > maxdiff)
maxdiff = this_diff;
}
}
}
if (numdiffs > 0) {
cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << "; Max Diff = " << maxdiff
<< endl;
} else {
cout << "No differences found between base and test versions\n";
}
}
__host__ double rtclock() { // Seconds
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) {
cout << "Error return from gettimeofday: " << stat << "\n";
}
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
int main() {
double** h_ser_in = new double*[SIZE1];
double** h_ser_out = new double*[SIZE1];
double** h_k1_in = new double*[SIZE1];
double** h_k1_out = new double*[SIZE1];
for (int i = 0; i < SIZE1; i++) {
h_ser_in[i] = new double[SIZE1];
h_ser_out[i] = new double[SIZE1];
h_k1_in[i] = new double[SIZE1];
h_k1_out[i] = new double[SIZE1];
}
for (int i = 0; i < SIZE1; i++) {
for (int j = 0; j < SIZE1; j++) {
h_ser_in[i][j] = 1;
h_ser_out[i][j] = 0;
h_k1_in[i][j] = 1;
h_k1_out[i][j] = 0;
}
}
double** h_k2_in = new double*[SIZE2];
double** h_k2_out = new double*[SIZE2];
for (int i = 0; i < SIZE2; i++) {
h_k2_in[i] = new double[SIZE2];
h_k2_out[i] = new double[SIZE2];
}
for (int i = 0; i < SIZE2; i++) {
for (int j = 0; j < SIZE2; j++) {
h_k2_in[i][j] = 1;
h_k2_out[i][j] = 0;
}
}
double clkbegin = rtclock();
serial(h_ser_in);
double clkend = rtclock();
double time = clkend - clkbegin; // seconds
cout << "Serial code on CPU: " << ((2.0 * SIZE1 * SIZE1 * ITER) / time)
<< " GFLOPS; Time = " << time * 1000 << " msec" << endl;
hipError_t status;
hipEvent_t start, end;
float k1_time; // ms
double* d_k1_in;
double* d_k1_out;
// TODO: Fill in
status = hipMalloc(&d_k1_in,SIZE1*SIZE1*sizeof(double));
if(status!=hipSuccess){
cout<<"Error in cuda malloc"<<endl;
}
double *tempinarr1 = new double[SIZE1*SIZE1];
for(int i=0;i<SIZE1*SIZE1;i++){
tempinarr1[i] = h_k1_in[(i/SIZE1)][(i%SIZE1)];
}
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
status = hipMemcpy(d_k1_in, tempinarr1, SIZE1*SIZE1*sizeof(double), hipMemcpyHostToDevice);
int threadPerBlock1 = 1024;
int numBlock1 = (int) ceil(((double)SIZE1)/((double)threadPerBlock1));
hipLaunchKernelGGL(( kernel1), dim3(numBlock1),dim3(threadPerBlock1), 0, 0, d_k1_in);
status = hipMemcpy(tempinarr1, d_k1_in, SIZE1*SIZE1*sizeof(double), hipMemcpyDeviceToHost);
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&k1_time, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
for(int i=0;i<SIZE1*SIZE1;i++){
h_k1_in[(i/SIZE1)][(i%SIZE1)] = tempinarr1[i];
}
check_result(h_ser_in, h_k1_in, SIZE1);
cout << "Kernel 1 on GPU: " << ((2.0 * SIZE1 * SIZE1 * ITER) / (k1_time * 1.0e-3))
<< " GFLOPS; Time = " << k1_time << " msec" << endl;
double* d_k2_in;
double* d_k2_out;
// TODO: Fill in
status = hipMalloc(&d_k2_in,SIZE2*SIZE2*sizeof(double));
if(status!=hipSuccess){
cout<<"Error in cuda malloc"<<endl;
}
double *tempinarr2 = new double[SIZE2*SIZE2];
for(int i=0;i<SIZE2*SIZE2;i++){
tempinarr2[i] = h_k2_in[(i/SIZE2)][(i%SIZE2)];
}
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
status = hipMemcpy(d_k2_in, tempinarr2, SIZE2*SIZE2*sizeof(double), hipMemcpyHostToDevice);
int threadPerBlock2 = min(SIZE2,1024);
int numBlock2 = (int) ceil(((double)SIZE2)/((double)threadPerBlock2));
hipLaunchKernelGGL(( kernel2), dim3(numBlock2),dim3(threadPerBlock2), 0, 0, d_k2_in);
status = hipMemcpy(tempinarr2, d_k2_in, SIZE2*SIZE2*sizeof(double), hipMemcpyDeviceToHost);
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&k1_time, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
for(int i=0;i<SIZE2*SIZE2;i++){
h_k2_in[(i/SIZE2)][(i%SIZE2)] = tempinarr2[i];
}
cout << "Kernel 2 on GPU: " << ((2.0 * SIZE2 * SIZE2 * ITER) / (k1_time * 1.0e-3))
<< " GFLOPS; Time = " << k1_time << " msec" << endl;
hipFree(d_k1_in);
hipFree(d_k2_in);
for (int i = 0; i < SIZE1; i++) {
delete[] h_ser_in[i];
delete[] h_ser_out[i];
delete[] h_k1_in[i];
delete[] h_k1_out[i];
}
delete[] h_ser_in;
delete[] h_ser_out;
delete[] h_k1_in;
delete[] h_k1_out;
for (int i = 0; i < SIZE2; i++) {
delete[] h_k2_in[i];
delete[] h_k2_out[i];
}
delete[] h_k2_in;
delete[] h_k2_out;
return EXIT_SUCCESS;
}
|
3e2b6dfa448db2575b45338bf9c59038ae9c1eee.cu
|
// Compile: nvcc -g -G -arch=sm_52 -std=c++11 assignment5-p1.cu -o assignment5-p1
#include <cmath>
#include <cstdint>
#include <cuda.h>
#include <iostream>
#include <new>
#include <sys/time.h>
#define THRESHOLD (0.000001)
#define SIZE1 8192
#define SIZE2 (SIZE1+8)
#define ITER 100
using std::cerr;
using std::cout;
using std::endl;
__global__ void kernel1(double* d_k1_in) {
// TODO: Fill in
int indexcomb = blockDim.x * blockIdx.x + threadIdx.x;
int j = indexcomb;
if(j<(SIZE1-1)){
for(int k=0;k<ITER;k++){
for(int i=1;i<(SIZE1-1);i++){
double temp=0.0;
temp = d_k1_in[(i - 1)*SIZE1 + j + 1] + d_k1_in[i*SIZE1 + j+1];
temp += d_k1_in[(i+1)*SIZE1 + j+1];
d_k1_in[i*SIZE1 + j+1] = temp;
}
}
}
}
__global__ void kernel2(double* d_k2_in) {
// TODO: Fill in
int indexcomb = blockDim.x * blockIdx.x + threadIdx.x;
int j = indexcomb;
if(j<(SIZE2-1)){
for(int k=0;k<ITER;k++){
int i=1;
//loop unrolled over 4 ways
for(;i+3<(SIZE2-1);i+=4){
double temp=d_k2_in[(i - 1)*SIZE2 + j + 1] + d_k2_in[i*SIZE2 + j+1];
temp += d_k2_in[(i+1)*SIZE2 + j+1];
d_k2_in[i*SIZE2 + j+1] = temp;
temp=d_k2_in[(i)*SIZE2 + j + 1] + d_k2_in[(i+1)*SIZE2 + j+1];
temp += d_k2_in[(i+2)*SIZE2 + j+1];
d_k2_in[(i+1)*SIZE2 + j+1] = temp;
temp=d_k2_in[(i+1)*SIZE2 + j + 1] + d_k2_in[(i+2)*SIZE2 + j+1];
temp += d_k2_in[(i+3)*SIZE2 + j+1];
d_k2_in[(i+2)*SIZE2 + j+1] = temp;
temp=d_k2_in[(i+2)*SIZE2 + j + 1] + d_k2_in[(i+3)*SIZE2 + j+1];
temp += d_k2_in[(i+4)*SIZE2 + j+1];
d_k2_in[(i+3)*SIZE2 + j+1] = temp;
}
for(;i<(SIZE2-1);i++){
double temp=d_k2_in[(i - 1)*SIZE2 + j + 1] + d_k2_in[i*SIZE2 + j+1];
temp += d_k2_in[(i+1)*SIZE2 + j+1];
d_k2_in[i*SIZE2 + j+1] = temp;
}
}
}
}
__host__ void serial(double** h_ser_in) {
for (int k = 0; k < ITER; k++) {
for (int i = 1; i < (SIZE1 - 1); i++) {
for (int j = 0; j < (SIZE1 - 1); j++) {
h_ser_in[i][j + 1] =
(h_ser_in[i - 1][j + 1] + h_ser_in[i][j + 1] + h_ser_in[i + 1][j + 1]);
}
}
}
}
__host__ void check_result(double** w_ref, double** w_opt, uint64_t size) {
double maxdiff = 0.0, this_diff = 0.0;
int numdiffs = 0;
for (uint64_t i = 0; i < size; i++) {
for (uint64_t j = 0; j < size; j++) {
this_diff = w_ref[i][j] - w_opt[i][j];
if (fabs(this_diff) > THRESHOLD) {
numdiffs++;
if (this_diff > maxdiff)
maxdiff = this_diff;
}
}
}
if (numdiffs > 0) {
cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << "; Max Diff = " << maxdiff
<< endl;
} else {
cout << "No differences found between base and test versions\n";
}
}
__host__ double rtclock() { // Seconds
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) {
cout << "Error return from gettimeofday: " << stat << "\n";
}
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
int main() {
double** h_ser_in = new double*[SIZE1];
double** h_ser_out = new double*[SIZE1];
double** h_k1_in = new double*[SIZE1];
double** h_k1_out = new double*[SIZE1];
for (int i = 0; i < SIZE1; i++) {
h_ser_in[i] = new double[SIZE1];
h_ser_out[i] = new double[SIZE1];
h_k1_in[i] = new double[SIZE1];
h_k1_out[i] = new double[SIZE1];
}
for (int i = 0; i < SIZE1; i++) {
for (int j = 0; j < SIZE1; j++) {
h_ser_in[i][j] = 1;
h_ser_out[i][j] = 0;
h_k1_in[i][j] = 1;
h_k1_out[i][j] = 0;
}
}
double** h_k2_in = new double*[SIZE2];
double** h_k2_out = new double*[SIZE2];
for (int i = 0; i < SIZE2; i++) {
h_k2_in[i] = new double[SIZE2];
h_k2_out[i] = new double[SIZE2];
}
for (int i = 0; i < SIZE2; i++) {
for (int j = 0; j < SIZE2; j++) {
h_k2_in[i][j] = 1;
h_k2_out[i][j] = 0;
}
}
double clkbegin = rtclock();
serial(h_ser_in);
double clkend = rtclock();
double time = clkend - clkbegin; // seconds
cout << "Serial code on CPU: " << ((2.0 * SIZE1 * SIZE1 * ITER) / time)
<< " GFLOPS; Time = " << time * 1000 << " msec" << endl;
cudaError_t status;
cudaEvent_t start, end;
float k1_time; // ms
double* d_k1_in;
double* d_k1_out;
// TODO: Fill in
status = cudaMalloc(&d_k1_in,SIZE1*SIZE1*sizeof(double));
if(status!=cudaSuccess){
cout<<"Error in cuda malloc"<<endl;
}
double *tempinarr1 = new double[SIZE1*SIZE1];
for(int i=0;i<SIZE1*SIZE1;i++){
tempinarr1[i] = h_k1_in[(i/SIZE1)][(i%SIZE1)];
}
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_k1_in, tempinarr1, SIZE1*SIZE1*sizeof(double), cudaMemcpyHostToDevice);
int threadPerBlock1 = 1024;
int numBlock1 = (int) ceil(((double)SIZE1)/((double)threadPerBlock1));
kernel1<<<numBlock1,threadPerBlock1>>>(d_k1_in);
status = cudaMemcpy(tempinarr1, d_k1_in, SIZE1*SIZE1*sizeof(double), cudaMemcpyDeviceToHost);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&k1_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
for(int i=0;i<SIZE1*SIZE1;i++){
h_k1_in[(i/SIZE1)][(i%SIZE1)] = tempinarr1[i];
}
check_result(h_ser_in, h_k1_in, SIZE1);
cout << "Kernel 1 on GPU: " << ((2.0 * SIZE1 * SIZE1 * ITER) / (k1_time * 1.0e-3))
<< " GFLOPS; Time = " << k1_time << " msec" << endl;
double* d_k2_in;
double* d_k2_out;
// TODO: Fill in
status = cudaMalloc(&d_k2_in,SIZE2*SIZE2*sizeof(double));
if(status!=cudaSuccess){
cout<<"Error in cuda malloc"<<endl;
}
double *tempinarr2 = new double[SIZE2*SIZE2];
for(int i=0;i<SIZE2*SIZE2;i++){
tempinarr2[i] = h_k2_in[(i/SIZE2)][(i%SIZE2)];
}
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_k2_in, tempinarr2, SIZE2*SIZE2*sizeof(double), cudaMemcpyHostToDevice);
int threadPerBlock2 = min(SIZE2,1024);
int numBlock2 = (int) ceil(((double)SIZE2)/((double)threadPerBlock2));
kernel2<<<numBlock2,threadPerBlock2>>>(d_k2_in);
status = cudaMemcpy(tempinarr2, d_k2_in, SIZE2*SIZE2*sizeof(double), cudaMemcpyDeviceToHost);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&k1_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
for(int i=0;i<SIZE2*SIZE2;i++){
h_k2_in[(i/SIZE2)][(i%SIZE2)] = tempinarr2[i];
}
cout << "Kernel 2 on GPU: " << ((2.0 * SIZE2 * SIZE2 * ITER) / (k1_time * 1.0e-3))
<< " GFLOPS; Time = " << k1_time << " msec" << endl;
cudaFree(d_k1_in);
cudaFree(d_k2_in);
for (int i = 0; i < SIZE1; i++) {
delete[] h_ser_in[i];
delete[] h_ser_out[i];
delete[] h_k1_in[i];
delete[] h_k1_out[i];
}
delete[] h_ser_in;
delete[] h_ser_out;
delete[] h_k1_in;
delete[] h_k1_out;
for (int i = 0; i < SIZE2; i++) {
delete[] h_k2_in[i];
delete[] h_k2_out[i];
}
delete[] h_k2_in;
delete[] h_k2_out;
return EXIT_SUCCESS;
}
|
50ce534c4faad5102c6b87651e798be39dfc8d81.hip
|
// !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file vn_app.cu
*
* @brief single-source shortest path (vn) application
*/
#include <gunrock/gunrock.h>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph definations
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
// single-source shortest path includes
#include <gunrock/app/vn/vn_enactor.cuh>
#include <gunrock/app/vn/vn_test.cuh>
namespace gunrock {
namespace app {
namespace vn {
hipError_t UseParameters(util::Parameters ¶meters) {
hipError_t retval = hipSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(parameters.Use<std::string>(
"src",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"0",
"<Vertex-IDs|random|largestdegree> The source vertices\n"
"\tIf random, randomly select non-zero degree vertices;\n"
"\tIf largestdegree, select vertices with largest degrees",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"src-seed",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
util::PreDefinedValues<int>::InvalidValue,
"seed to generate random sources", __FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"srcs-per-run",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
util::PreDefinedValues<int>::InvalidValue,
"number of source nodes per run", __FILE__, __LINE__));
return retval;
}
/**
* @brief Run vn tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] ref_distances Reference distances
* @param[in] target Whether to perform the vn
* \return hipError_t error message(s), if any
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT,
typename VertexT = typename GraphT::VertexT>
hipError_t RunTests(util::Parameters ¶meters, GraphT &graph,
ValueT **ref_distances = NULL,
util::Location target = util::DEVICE) {
hipError_t retval = hipSuccess;
// typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// parse configurations from parameters
bool quiet_mode = parameters.Get<bool>("quiet");
bool mark_pred = parameters.Get<bool>("mark-pred");
std::string validation = parameters.Get<std::string>("validation");
// Load srcs
std::vector<VertexT> srcs_vector =
parameters.Get<std::vector<VertexT>>("srcs");
int total_num_srcs = srcs_vector.size();
int num_runs = parameters.Get<int>("num-runs");
int srcs_per_run = parameters.Get<int>("srcs-per-run");
if (srcs_per_run == util::PreDefinedValues<int>::InvalidValue) {
srcs_per_run = total_num_srcs;
}
assert(total_num_srcs == num_runs * srcs_per_run);
VertexT *all_srcs = &srcs_vector[0];
util::Info info("vn", parameters, graph); // initialize Info structure
// Allocate host-side array (for both reference and GPU-computed results)
ValueT *h_distances = new ValueT[graph.nodes];
VertexT *h_preds = (mark_pred) ? new VertexT[graph.nodes] : NULL;
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
// info.preprocess_time = cpu_timer.ElapsedMillis();
VertexT *srcs = new VertexT[srcs_per_run];
for (int run_num = 0; run_num < num_runs; ++run_num) {
for (SizeT i = 0; i < srcs_per_run; ++i) {
srcs[i] = all_srcs[run_num * srcs_per_run + i % total_num_srcs];
}
GUARD_CU(problem.Reset(srcs, srcs_per_run, target));
GUARD_CU(enactor.Reset(srcs, srcs_per_run, target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact(srcs));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
std::string src_msg = "";
for (SizeT i = 0; i < srcs_per_run; ++i) {
src_msg += std::to_string(srcs[i]);
if (i != srcs_per_run - 1) src_msg += ",";
}
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
" ms, srcs = " + src_msg + ", #iterations = " // TODO -- fix docs
+ std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_distances, h_preds));
SizeT num_errors = app::vn::Validate_Results(
parameters, graph, srcs, h_distances, h_preds,
ref_distances == NULL ? NULL : ref_distances[run_num], (VertexT*)NULL, false);
}
}
cpu_timer.Start();
GUARD_CU(problem.Extract(h_distances, h_preds));
if (validation == "last") {
SizeT num_errors = app::vn::Validate_Results(
parameters, graph, srcs, h_distances, h_preds,
ref_distances == NULL ? NULL : ref_distances[num_runs - 1]);
}
// compute running statistics
info.ComputeTraversalStats(enactor, h_distances);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_distances;
h_distances = NULL;
delete[] h_preds;
h_preds = NULL;
delete[] all_srcs;
all_srcs = NULL;
delete[] srcs;
srcs = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace vn
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_vn function
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] distances Return shortest distance to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
double gunrock_vn(gunrock::util::Parameters ¶meters, GraphT &graph,
ValueT *distances, typename GraphT::VertexT *preds = NULL) {
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef gunrock::app::vn::Problem<GraphT> ProblemT;
typedef gunrock::app::vn::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
std::vector<VertexT> srcs_vector =
parameters.Get<std::vector<VertexT>>("srcs");
SizeT num_srcs = srcs_vector.size();
VertexT *srcs = new VertexT[num_srcs];
for (SizeT i = 0; i < num_srcs; ++i) {
srcs[i] = srcs_vector[i];
}
int num_runs = parameters.Get<int>("num-runs");
for (int run_num = 0; run_num < num_runs; ++run_num) {
problem.Reset(srcs, num_srcs, target);
enactor.Reset(srcs, num_srcs, target);
cpu_timer.Start();
enactor.Enact(srcs);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
problem.Extract(distances, preds == NULL ? NULL : preds);
}
enactor.Release(target);
problem.Release(target);
srcs_vector.clear();
return total_time;
}
// * @brief Simple interface take in graph as CSR format
// * @param[in] num_nodes Number of veritces in the input graph
// * @param[in] num_edges Number of edges in the input graph
// * @param[in] row_offsets CSR-formatted graph input row offsets
// * @param[in] col_indices CSR-formatted graph input column indices
// * @param[in] edge_values CSR-formatted graph input edge weights
// * @param[in] num_runs Number of runs to perform vn
// * @param[in] sources Sources to begin traverse, one for each run
// * @param[in] mark_preds Whether to output predecessor info
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
template <typename VertexT = int, typename SizeT = int,
typename GValueT = unsigned int, typename vnValueT = GValueT>
double vn(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
const GValueT *edge_values, const int num_runs, VertexT *sources,
const bool mark_pred, vnValueT *distances, VertexT *preds = NULL) {
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_EDGE_VALUES |
gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("vn");
gunrock::graphio::UseParameters(parameters);
gunrock::app::vn::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("mark-pred", mark_pred);
parameters.Set("num-runs", num_runs);
std::vector<VertexT> srcs;
for (int i = 0; i < num_runs; i++) srcs.push_back(sources[i]);
parameters.Set("srcs", srcs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer(row_offsets, num_nodes + 1,
gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer(col_indices, num_edges,
gunrock::util::HOST);
// graph.CsrT::edge_values .SetPointer(edge_values, gunrock::util::HOST);
// graph.FromCsr(graph.csr(), true, quiet);
gunrock::graphio::LoadGraph(parameters, graph);
// Run the vn
double elapsed_time = gunrock_vn(parameters, graph, distances, preds);
// Cleanup
graph.Release();
srcs.clear();
return elapsed_time;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
50ce534c4faad5102c6b87651e798be39dfc8d81.cu
|
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file vn_app.cu
*
* @brief single-source shortest path (vn) application
*/
#include <gunrock/gunrock.h>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph definations
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
// single-source shortest path includes
#include <gunrock/app/vn/vn_enactor.cuh>
#include <gunrock/app/vn/vn_test.cuh>
namespace gunrock {
namespace app {
namespace vn {
cudaError_t UseParameters(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(parameters.Use<std::string>(
"src",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"0",
"<Vertex-IDs|random|largestdegree> The source vertices\n"
"\tIf random, randomly select non-zero degree vertices;\n"
"\tIf largestdegree, select vertices with largest degrees",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"src-seed",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
util::PreDefinedValues<int>::InvalidValue,
"seed to generate random sources", __FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"srcs-per-run",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
util::PreDefinedValues<int>::InvalidValue,
"number of source nodes per run", __FILE__, __LINE__));
return retval;
}
/**
* @brief Run vn tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] ref_distances Reference distances
* @param[in] target Whether to perform the vn
* \return cudaError_t error message(s), if any
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT,
typename VertexT = typename GraphT::VertexT>
cudaError_t RunTests(util::Parameters ¶meters, GraphT &graph,
ValueT **ref_distances = NULL,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
// typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// parse configurations from parameters
bool quiet_mode = parameters.Get<bool>("quiet");
bool mark_pred = parameters.Get<bool>("mark-pred");
std::string validation = parameters.Get<std::string>("validation");
// Load srcs
std::vector<VertexT> srcs_vector =
parameters.Get<std::vector<VertexT>>("srcs");
int total_num_srcs = srcs_vector.size();
int num_runs = parameters.Get<int>("num-runs");
int srcs_per_run = parameters.Get<int>("srcs-per-run");
if (srcs_per_run == util::PreDefinedValues<int>::InvalidValue) {
srcs_per_run = total_num_srcs;
}
assert(total_num_srcs == num_runs * srcs_per_run);
VertexT *all_srcs = &srcs_vector[0];
util::Info info("vn", parameters, graph); // initialize Info structure
// Allocate host-side array (for both reference and GPU-computed results)
ValueT *h_distances = new ValueT[graph.nodes];
VertexT *h_preds = (mark_pred) ? new VertexT[graph.nodes] : NULL;
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
// info.preprocess_time = cpu_timer.ElapsedMillis();
VertexT *srcs = new VertexT[srcs_per_run];
for (int run_num = 0; run_num < num_runs; ++run_num) {
for (SizeT i = 0; i < srcs_per_run; ++i) {
srcs[i] = all_srcs[run_num * srcs_per_run + i % total_num_srcs];
}
GUARD_CU(problem.Reset(srcs, srcs_per_run, target));
GUARD_CU(enactor.Reset(srcs, srcs_per_run, target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact(srcs));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
std::string src_msg = "";
for (SizeT i = 0; i < srcs_per_run; ++i) {
src_msg += std::to_string(srcs[i]);
if (i != srcs_per_run - 1) src_msg += ",";
}
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
" ms, srcs = " + src_msg + ", #iterations = " // TODO -- fix docs
+ std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_distances, h_preds));
SizeT num_errors = app::vn::Validate_Results(
parameters, graph, srcs, h_distances, h_preds,
ref_distances == NULL ? NULL : ref_distances[run_num], (VertexT*)NULL, false);
}
}
cpu_timer.Start();
GUARD_CU(problem.Extract(h_distances, h_preds));
if (validation == "last") {
SizeT num_errors = app::vn::Validate_Results(
parameters, graph, srcs, h_distances, h_preds,
ref_distances == NULL ? NULL : ref_distances[num_runs - 1]);
}
// compute running statistics
info.ComputeTraversalStats(enactor, h_distances);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_distances;
h_distances = NULL;
delete[] h_preds;
h_preds = NULL;
delete[] all_srcs;
all_srcs = NULL;
delete[] srcs;
srcs = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace vn
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_vn function
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] distances Return shortest distance to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
double gunrock_vn(gunrock::util::Parameters ¶meters, GraphT &graph,
ValueT *distances, typename GraphT::VertexT *preds = NULL) {
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef gunrock::app::vn::Problem<GraphT> ProblemT;
typedef gunrock::app::vn::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
std::vector<VertexT> srcs_vector =
parameters.Get<std::vector<VertexT>>("srcs");
SizeT num_srcs = srcs_vector.size();
VertexT *srcs = new VertexT[num_srcs];
for (SizeT i = 0; i < num_srcs; ++i) {
srcs[i] = srcs_vector[i];
}
int num_runs = parameters.Get<int>("num-runs");
for (int run_num = 0; run_num < num_runs; ++run_num) {
problem.Reset(srcs, num_srcs, target);
enactor.Reset(srcs, num_srcs, target);
cpu_timer.Start();
enactor.Enact(srcs);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
problem.Extract(distances, preds == NULL ? NULL : preds);
}
enactor.Release(target);
problem.Release(target);
srcs_vector.clear();
return total_time;
}
// * @brief Simple interface take in graph as CSR format
// * @param[in] num_nodes Number of veritces in the input graph
// * @param[in] num_edges Number of edges in the input graph
// * @param[in] row_offsets CSR-formatted graph input row offsets
// * @param[in] col_indices CSR-formatted graph input column indices
// * @param[in] edge_values CSR-formatted graph input edge weights
// * @param[in] num_runs Number of runs to perform vn
// * @param[in] sources Sources to begin traverse, one for each run
// * @param[in] mark_preds Whether to output predecessor info
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
template <typename VertexT = int, typename SizeT = int,
typename GValueT = unsigned int, typename vnValueT = GValueT>
double vn(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
const GValueT *edge_values, const int num_runs, VertexT *sources,
const bool mark_pred, vnValueT *distances, VertexT *preds = NULL) {
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_EDGE_VALUES |
gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("vn");
gunrock::graphio::UseParameters(parameters);
gunrock::app::vn::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("mark-pred", mark_pred);
parameters.Set("num-runs", num_runs);
std::vector<VertexT> srcs;
for (int i = 0; i < num_runs; i++) srcs.push_back(sources[i]);
parameters.Set("srcs", srcs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer(row_offsets, num_nodes + 1,
gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer(col_indices, num_edges,
gunrock::util::HOST);
// graph.CsrT::edge_values .SetPointer(edge_values, gunrock::util::HOST);
// graph.FromCsr(graph.csr(), true, quiet);
gunrock::graphio::LoadGraph(parameters, graph);
// Run the vn
double elapsed_time = gunrock_vn(parameters, graph, distances, preds);
// Cleanup
graph.Release();
srcs.clear();
return elapsed_time;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
6630d355b008e379b2003894ea1f0d4196a7651e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_bc.cu
*
* @brief Simple test driver program for BC.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <queue>
#include <iostream>
#include <fstream>
#include <algorithm>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// BC includes
#include <gunrock/app/bc/bc_enactor.cuh>
#include <gunrock/app/bc/bc_problem.cuh>
#include <gunrock/app/bc/bc_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// Boost includes
#include <boost/config.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/connected_components.hpp>
#include <boost/graph/bc_clustering.hpp>
#include <boost/graph/iteration_macros.hpp>
using namespace gunrock;
using namespace gunrock::app;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::bc;
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf(
"test <graph-type> [graph-type-arguments]\n"
"Graph type and graph type arguments:\n"
" market <matrix-market-file-name>\n"
" Reads a Matrix-Market coordinate-formatted graph of\n"
" directed/undirected edges from STDIN (or from the\n"
" optionally-specified file).\n"
" rmat (default: rmat_scale = 10, a = 0.57, b = c = 0.19)\n"
" Generate R-MAT graph as input\n"
" --rmat_scale=<vertex-scale>\n"
" --rmat_nodes=<number-nodes>\n"
" --rmat_edgefactor=<edge-factor>\n"
" --rmat_edges=<number-edges>\n"
" --rmat_a=<factor> --rmat_b=<factor> --rmat_c=<factor>\n"
" --rmat_seed=<seed>\n"
" rgg (default: rgg_scale = 10, rgg_thfactor = 0.55)\n"
" Generate Random Geometry Graph as input\n"
" --rgg_scale=<vertex-scale>\n"
" --rgg_nodes=<number-nodes>\n"
" --rgg_thfactor=<threshold-factor>\n"
" --rgg_threshold=<threshold>\n"
" --rgg_vmultipiler=<vmultipiler>\n"
" --rgg_seed=<seed>\n\n"
"Optional arguments:\n"
"[--device=<device_index>] Set GPU(s) for testing (Default: 0).\n"
"[--instrumented] Keep kernels statics [Default: Disable].\n"
" total_queued, search_depth and barrier duty.\n"
" (a relative indicator of load imbalance.)\n"
"[--src=<Vertex-ID|randomize|largestdegree>]\n"
" Begins traversal from the source (Default: 0).\n"
" If randomize: from a random source vertex.\n"
" If largestdegree: from largest degree vertex.\n"
"[--quick] Skip the CPU reference validation process.\n"
"[--mark-pred] Keep both label info and predecessor info.\n"
"[--disable-size-check] Disable frontier queue size check.\n"
"[--grid-size=<grid size>] Maximum allowed grid size setting.\n"
"[--queue-sizing=<factor>] Allocates a frontier queue sized at: \n"
" (graph-edges * <factor>). (Default: 1.0)\n"
"[--in-sizing=<in/out_queue_scale_factor>]\n"
" Allocates a frontier queue sized at: \n"
" (graph-edges * <factor>). (Default: 1.0)\n"
"[--v] Print verbose per iteration debug info.\n"
"[--iteration-num=<num>] Number of runs to perform the test.\n"
"[--traversal-mode=<0|1>] Set traversal strategy, 0 for Load-Balanced\n"
" 1 for Dynamic-Cooperative (Default: dynamic\n"
" determine based on average degree).\n"
"[--partition_method=<random|biasrandom|clustered|metis>]\n"
" Choose partitioner (Default use random).\n"
"[--ref-file=<file_name>] Use pre-computed result in file to verify.\n"
"[--quiet] No output (unless --json is specified).\n"
"[--json] Output JSON-format statistics to STDOUT.\n"
"[--jsonfile=<name>] Output JSON-format statistics to file <name>\n"
"[--jsondir=<dir>] Output JSON-format statistics to <dir>/name,\n"
" where name is auto-generated.\n"
);
}
/**
* @brief Displays the BC result (sigma value and BC value)
*
* @tparam Value
* @tparam SizeT
*
* @param[in] sigmas
* @param[in] bc_values
* @param[in] nodes
* @param[in] quiet
*/
template <
typename SizeT,
typename Value>
void DisplaySolution(
Value *sigmas,
Value *bc_values,
SizeT nodes,
bool quiet = false)
{
if (quiet) return;
if (nodes < 40)
{
printf("[");
for (SizeT i = 0; i < nodes; ++i)
{
PrintValue(i);
printf(":");
PrintValue(sigmas[i]);
printf(",");
PrintValue(bc_values[i]);
printf(" ");
}
printf("]\n");
}
}
/******************************************************************************
* BC Testing Routines
*****************************************************************************/
/**
* @brief Graph edge properties (bundled properties)
*/
struct EdgeProperties
{
int weight;
};
/**
* @brief A simple CPU-based reference BC ranking implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to graph we process on
* @param[in] bc_values Pointer to node bc value
* @param[in] sigmas Pointer to node sigma value
* @param[in] source_path Pointer to a vector to store CPU computed labels for each node
* @param[in] src VertexId of source node if there is any
* @param[in] quiet Don't print out anything to stdout
*/
template <
typename VertexId,
typename SizeT,
typename Value >
void ReferenceBC(
const Csr<VertexId, SizeT, Value> &graph,
Value *bc_values,
Value *sigmas,
VertexId *source_path,
VertexId src,
bool quiet = false)
{
typedef Coo<VertexId, Value> EdgeTupleType;
EdgeTupleType *coo = (EdgeTupleType*) malloc(sizeof(EdgeTupleType) * graph.edges);
if (src == -1)
{
// Perform full exact BC using BGL
using namespace boost;
typedef adjacency_list <setS, vecS, undirectedS, no_property,
EdgeProperties> Graph;
typedef Graph::vertex_descriptor Vertex;
typedef Graph::edge_descriptor Edge;
Graph G;
for (int i = 0; i < graph.nodes; ++i)
{
for (int j = graph.row_offsets[i]; j < graph.row_offsets[i + 1]; ++j)
{
add_edge(vertex(i, G), vertex(graph.column_indices[j], G), G);
}
}
typedef std::map<Edge, int> StdEdgeIndexMap;
StdEdgeIndexMap my_e_index;
typedef boost::associative_property_map< StdEdgeIndexMap > EdgeIndexMap;
EdgeIndexMap e_index(my_e_index);
int i = 0;
BGL_FORALL_EDGES(edge, G, Graph)
{
my_e_index.insert(std::pair<Edge, int>(edge, i));
++i;
}
// Define EdgeCentralityMap
std::vector< double > e_centrality_vec(boost::num_edges(G), 0.0);
// Create the external property map
boost::iterator_property_map< std::vector< double >::iterator,
EdgeIndexMap >
e_centrality_map(e_centrality_vec.begin(), e_index);
// Define VertexCentralityMap
typedef boost::property_map< Graph, boost::vertex_index_t>::type
VertexIndexMap;
VertexIndexMap v_index = get(boost::vertex_index, G);
std::vector< double > v_centrality_vec(boost::num_vertices(G), 0.0);
// Create the external property map
boost::iterator_property_map< std::vector< double >::iterator,
VertexIndexMap>
v_centrality_map(v_centrality_vec.begin(), v_index);
// Perform BC
CpuTimer cpu_timer;
cpu_timer.Start();
brandes_betweenness_centrality(G, v_centrality_map, e_centrality_map);
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
BGL_FORALL_VERTICES(vertex, G, Graph)
{
bc_values[vertex] = (Value)v_centrality_map[vertex];
}
int idx = 0;
BGL_FORALL_EDGES(edge, G, Graph)
{
coo[idx].row = source(edge, G);
coo[idx].col = target(edge, G);
coo[idx++].val = (Value)e_centrality_map[edge];
coo[idx].col = source(edge, G);
coo[idx].row = target(edge, G);
coo[idx++].val = (Value)e_centrality_map[edge];
}
std::stable_sort(coo, coo + graph.edges,
RowFirstTupleCompare<EdgeTupleType>);
if (!quiet)
{
printf("CPU BC finished in %lf msec.", elapsed);
}
}
else
{
// Simple BFS pass to get single pass BC
// VertexId *source_path = new VertexId[graph.nodes];
// Initialize distances
for (VertexId i = 0; i < graph.nodes; ++i)
{
source_path[i] = -1;
bc_values[i] = 0;
sigmas[i] = 0;
}
source_path[src] = 0;
VertexId search_depth = 0;
sigmas[src] = 1;
// Initialize queue for managing previously-discovered nodes
std::deque<VertexId> frontier;
frontier.push_back(src);
//
// Perform one pass of BFS for one source
//
CpuTimer cpu_timer;
cpu_timer.Start();
while (!frontier.empty())
{
// Dequeue node from frontier
VertexId dequeued_node = frontier.front();
frontier.pop_front();
VertexId neighbor_dist = source_path[dequeued_node] + 1;
// Locate adjacency list
int edges_begin = graph.row_offsets[dequeued_node];
int edges_end = graph.row_offsets[dequeued_node + 1];
for (int edge = edges_begin; edge < edges_end; ++edge)
{
// Lookup neighbor and enqueue if undiscovered
VertexId neighbor = graph.column_indices[edge];
if (source_path[neighbor] == -1)
{
source_path[neighbor] = neighbor_dist;
sigmas[neighbor] += sigmas[dequeued_node];
if (search_depth < neighbor_dist)
{
search_depth = neighbor_dist;
}
frontier.push_back(neighbor);
}
else
{
if (source_path[neighbor] == source_path[dequeued_node] + 1)
sigmas[neighbor] += sigmas[dequeued_node];
}
}
}
search_depth++;
for (int iter = search_depth - 2; iter > 0; --iter)
{
int cur_level = 0;
for (int node = 0; node < graph.nodes; ++node)
{
if (source_path[node] == iter)
{
++cur_level;
int edges_begin = graph.row_offsets[node];
int edges_end = graph.row_offsets[node + 1];
for (int edge = edges_begin; edge < edges_end; ++edge)
{
VertexId neighbor = graph.column_indices[edge];
if (source_path[neighbor] == iter + 1)
{
bc_values[node] +=
1.0f * sigmas[node] / sigmas[neighbor] *
(1.0f + bc_values[neighbor]);
}
}
}
}
}
for (int i = 0; i < graph.nodes; ++i)
{
bc_values[i] *= 0.5f;
}
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
if (!quiet)
{
printf("CPU BC finished in %lf msec. Search depth: %d\n",
elapsed, search_depth);
}
//delete[] source_path;
}
free(coo);
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] info Pointer to info contains parameters and statistics.
*
* \return hipError_t object which indicates the success of
* all CUDA function calls.
*/
template <
typename VertexId,
typename SizeT,
typename Value>
hipError_t RunTests(Info<VertexId, SizeT, Value> *info)
{
typedef BCProblem < VertexId,
SizeT,
Value,
true> // MARK_PREDECESSORS
Problem; //does not use double buffer
typedef BCEnactor < Problem>
//INSTRUMENT,
//DEBUG,
//SIZE_CHECK >
Enactor;
// parse configurations from mObject info
Csr<VertexId, SizeT, Value> *graph = info->csr_ptr;
VertexId src = info->info["source_vertex" ].get_int64();
bool quiet_mode = info->info["quiet_mode" ].get_bool();
int max_grid_size = info->info["max_grid_size" ].get_int();
int num_gpus = info->info["num_gpus" ].get_int();
double max_queue_sizing = info->info["max_queue_sizing" ].get_real();
double max_queue_sizing1 = info->info["max_queue_sizing1" ].get_real();
double max_in_sizing = info->info["max_in_sizing" ].get_real();
std::string partition_method = info->info["partition_method" ].get_str();
double partition_factor = info->info["partition_factor" ].get_real();
int partition_seed = info->info["partition_seed" ].get_int();
bool quick_mode = info->info["quick_mode" ].get_bool();
bool stream_from_host = info->info["stream_from_host" ].get_bool();
bool instrument = info->info["instrument" ].get_bool ();
bool debug = info->info["debug_mode" ].get_bool ();
bool size_check = info->info["size_check" ].get_bool ();
int iterations = info->info["num_iteration" ].get_int();
std::string src_type = info->info["source_type" ].get_str ();
int src_seed = info->info["source_seed" ].get_int ();
std::string ref_filename = info->info["ref_filename" ].get_str();
int communicate_latency = info->info["communicate_latency"].get_int ();
float communicate_multipy = info->info["communicate_multipy"].get_real();
int expand_latency = info->info["expand_latency" ].get_int ();
int subqueue_latency = info->info["subqueue_latency" ].get_int ();
int fullqueue_latency = info->info["fullqueue_latency" ].get_int ();
int makeout_latency = info->info["makeout_latency" ].get_int ();
std::string traversal_mode = info->info["traversal_mode" ].get_str ();
if (traversal_mode == "TWC") traversal_mode = "LB";
if (max_queue_sizing < 0) max_queue_sizing = 1.2;
if (max_in_sizing < 0) max_in_sizing = 1.1;
if (communicate_multipy > 1) max_in_sizing *= communicate_multipy;
CpuTimer cpu_timer;
hipError_t retval = hipSuccess;
cpu_timer.Start();
json_spirit::mArray device_list = info->info["device_list"].get_array();
int* gpu_idx = new int[num_gpus];
for (int i = 0; i < num_gpus; i++) gpu_idx[i] = device_list[i].get_int();
// TODO: remove after merge mgpu-cq
ContextPtr *context = (ContextPtr*) info->context;
hipStream_t *streams = (hipStream_t*)info->streams;
// Allocate host-side array (for both reference and GPU-computed results)
Value *reference_bc_values = new Value [graph->nodes];
Value *reference_sigmas = new Value [graph->nodes];
VertexId *reference_labels = new VertexId[graph->nodes];
Value *h_sigmas = new Value [graph->nodes];
Value *h_bc_values = new Value [graph->nodes];
VertexId *h_labels = new VertexId[graph->nodes];
Value *reference_check_bc_values = (quick_mode) ? NULL : reference_bc_values;
Value *reference_check_sigmas = (quick_mode || (src == -1)) ? NULL : reference_sigmas;
VertexId *reference_check_labels = (quick_mode || (src == -1)) ? NULL : reference_labels;
size_t *org_size = new size_t[num_gpus];
for (int gpu = 0; gpu < num_gpus; gpu++)
{
size_t dummy;
if (retval = util::SetDevice(gpu_idx[gpu])) return retval;
hipMemGetInfo(&(org_size[gpu]), &dummy);
}
Problem* problem = new Problem(false); // allocate problem on GPU
if (retval = util::GRError(problem->Init(
stream_from_host,
graph,
NULL,
num_gpus,
gpu_idx,
partition_method,
streams,
max_queue_sizing,
max_in_sizing,
partition_factor,
partition_seed),
"BC Problem Initialization Failed", __FILE__, __LINE__))
return retval;
Enactor* enactor = new Enactor(
num_gpus, gpu_idx, instrument, debug, size_check); // enactor map
if (retval = util::GRError(enactor->Init(
context, problem, max_grid_size, traversal_mode),
"BC Enactor init failed", __FILE__, __LINE__))
return retval;
enactor -> communicate_latency = communicate_latency;
enactor -> communicate_multipy = communicate_multipy;
enactor -> expand_latency = expand_latency;
enactor -> subqueue_latency = subqueue_latency;
enactor -> fullqueue_latency = fullqueue_latency;
enactor -> makeout_latency = makeout_latency;
if (retval = util::SetDevice(gpu_idx[0])) return retval;
if (retval = util::latency::Test(
streams[0], problem -> data_slices[0] -> latency_data,
communicate_latency,
communicate_multipy,
expand_latency,
subqueue_latency,
fullqueue_latency,
makeout_latency)) return retval;
cpu_timer.Stop();
info -> info["preprocess_time"] = cpu_timer.ElapsedMillis();
// perform BC
double total_elapsed = 0.0;
double single_elapsed = 0.0;
double max_elapsed = 0.0;
double min_elapsed = 1e10;
json_spirit::mArray process_times;
VertexId start_src, end_src;
if (src_type == "random2")
{
if (src_seed == -1) src_seed = time(NULL);
if (!quiet_mode)
printf("src_seed = %d\n", src_seed);
srand(src_seed);
}
if (!quiet_mode)
printf("Using traversal-mode %s\n", traversal_mode.c_str());
for (int iter = 0; iter < iterations; ++iter)
{
//if (!quiet_mode)
//{
// printf("iteration:%d\n", iter);
//}
if (src_type == "random2")
{
bool src_valid = false;
while (!src_valid)
{
src = rand() % graph -> nodes;
if (graph -> row_offsets[src] != graph -> row_offsets[src+1])
src_valid = true;
}
}
if (src == -1)
{
start_src = 0;
end_src = graph->nodes;
}
else
{
start_src = src;
end_src = src + 1;
}
for (int gpu = 0; gpu < num_gpus; gpu++)
{
if (retval = util::SetDevice(gpu_idx[gpu])) return retval;
hipLaunchKernelGGL(( util::MemsetKernel) , dim3(128), dim3(128), 0, 0,
problem -> data_slices[gpu] -> bc_values.GetPointer(util::DEVICE),
(Value)0.0, problem->sub_graphs[gpu].nodes);
}
if (retval = util::GRError(problem->Reset(
0, enactor->GetFrontierType(),
max_queue_sizing, max_queue_sizing1),
"BC Problem Data Reset Failed", __FILE__, __LINE__))
return retval;
if (!quiet_mode)
{
printf("__________________________\n"); fflush(stdout);
}
single_elapsed = 0;
for (VertexId i = start_src; i < end_src; ++i)
{
if (retval = util::GRError(problem->Reset(
i, enactor->GetFrontierType(),
max_queue_sizing, max_queue_sizing1),
"BC Problem Data Reset Failed", __FILE__, __LINE__))
return retval;
if (retval = util::GRError(enactor ->Reset(),
"BC Enactor Reset failed", __FILE__, __LINE__))
return retval;
for (int gpu = 0; gpu < num_gpus; gpu++)
{
if (retval = util::SetDevice(gpu_idx[gpu]))
return retval;
if (retval = util::GRError(hipDeviceSynchronize(),
"hipDeviceSynchronize failed", __FILE__, __LINE__))
return retval;
}
cpu_timer.Start();
if (retval = util::GRError(enactor ->Enact(i, traversal_mode),
"BC Problem Enact Failed", __FILE__, __LINE__))
return retval;
cpu_timer.Stop();
single_elapsed += cpu_timer.ElapsedMillis();
}
total_elapsed += single_elapsed;
process_times.push_back(single_elapsed);
if (single_elapsed > max_elapsed) max_elapsed = single_elapsed;
if (single_elapsed < min_elapsed) min_elapsed = single_elapsed;
for (int gpu = 0; gpu < num_gpus; gpu++)
{
if (retval = util::SetDevice(gpu_idx[gpu])) return retval;
hipLaunchKernelGGL(( util::MemsetScaleKernel) , dim3(128), dim3(128), 0, 0,
problem -> data_slices[gpu] -> bc_values.GetPointer(util::DEVICE),
(Value)0.5, problem -> sub_graphs[gpu].nodes);
}
if (!quiet_mode)
{
printf("--------------------------\n"
"iteration %d elapsed: %lf ms, src = %lld\n",
iter, single_elapsed, (long long)src);
fflush(stdout);
}
}
total_elapsed /= iterations;
info -> info["process_times"] = process_times;
info -> info["min_process_time"] = min_elapsed;
info -> info["max_process_time"] = max_elapsed;
// compute reference CPU BC solution for source-distance
if (!quick_mode)
{
if (ref_filename.empty())
{
if (!quiet_mode) { printf("Computing reference value ...\n"); }
ReferenceBC(
*graph,
reference_check_bc_values,
reference_check_sigmas,
reference_check_labels,
src,
quiet_mode);
if (!quiet_mode) { printf("\n"); }
}
else
{
std::ifstream fin;
fin.open(ref_filename.c_str(), std::ios::binary);
for (int i = 0; i < graph->nodes; ++i)
{
fin.read(reinterpret_cast<char*>(&reference_check_bc_values[i]), sizeof(Value));
}
fin.close();
}
}
cpu_timer.Start();
// Copy out results
if (retval = util::GRError(problem -> Extract(
h_sigmas, h_bc_values, h_labels),
"BC Problem Data Extraction Failed", __FILE__, __LINE__))
return retval;
// Verify the result
if (!quick_mode)
{
//util::cpu_mt::PrintCPUArray<SizeT, Value>("reference_check_bc_values", reference_check_bc_values, graph->nodes);
//util::cpu_mt::PrintCPUArray<SizeT, Value>("bc_values", h_bc_values, graph->nodes);
if (!quiet_mode) { printf("Validity BC Value: "); }
int num_error = CompareResults(
h_bc_values, reference_check_bc_values,
graph->nodes, true, quiet_mode);
if (num_error > 0)
{
if (!quiet_mode) { printf("Number of errors occurred: %d\n", num_error); }
}
if (!quiet_mode) { printf("\n"); }
}
if (reference_check_sigmas != NULL)
{
if (!quiet_mode) { printf("Validity Sigma: "); }
int num_error = CompareResults(
h_sigmas, reference_check_sigmas,
graph->nodes, true, quiet_mode);
if (num_error > 0)
{
if (!quiet_mode)
{
printf("Number of errors occurred: %d\n", num_error);
}
}
if (!quiet_mode) { printf("\n"); }
}
if (reference_check_labels != NULL)
{
if (!quiet_mode) { printf("Validity labels: "); }
int num_error = CompareResults(
h_labels, reference_check_labels,
graph->nodes, true, quiet_mode);
if (num_error > 0)
{
if (!quiet_mode)
{
printf("Number of errors occurred: %d\n", num_error);
}
}
if (!quiet_mode) { printf("\n"); }
}
if (!quiet_mode)
{
// Display Solution
DisplaySolution(h_sigmas, h_bc_values, graph->nodes);
}
info->ComputeTraversalStats( // compute running statistics
enactor->enactor_stats.GetPointer(), total_elapsed, h_labels);
if (!quiet_mode)
{
printf("\n\tMemory Usage(B)\t");
for (int gpu = 0; gpu < num_gpus; gpu++)
if (num_gpus > 1) {if (gpu != 0) printf(" #keys%d,0\t #keys%d,1\t #ins%d,0\t #ins%d,1", gpu, gpu, gpu, gpu); else printf(" #keys%d,0\t #keys%d,1", gpu, gpu);}
else printf(" #keys%d,0\t #keys%d,1", gpu, gpu);
if (num_gpus > 1) printf(" #keys%d", num_gpus);
printf("\n");
double max_queue_sizing_[2] = {0, 0}, max_in_sizing_ = 0;
for (int gpu = 0; gpu < num_gpus; gpu++)
{
size_t gpu_free, dummy;
hipSetDevice(gpu_idx[gpu]);
hipMemGetInfo(&gpu_free, &dummy);
printf("GPU_%d\t %ld", gpu_idx[gpu], org_size[gpu] - gpu_free);
for (int i = 0; i < num_gpus; i++)
{
for (int j = 0; j < 2; j++)
{
SizeT x = problem->data_slices[gpu]->frontier_queues[i].keys[j].GetSize();
printf("\t %lld", (long long) x);
double factor = 1.0 * x / (num_gpus > 1 ? problem->graph_slices[gpu]->in_counter[i] : problem->graph_slices[gpu]->nodes);
if (factor > max_queue_sizing_[j]) max_queue_sizing_[j] = factor;
}
if (num_gpus > 1 && i != 0 )
for (int t = 0; t < 2; t++)
{
SizeT x = problem->data_slices[gpu][0].keys_in[t][i].GetSize();
printf("\t %lld", (long long) x);
double factor = 1.0 * x / problem->graph_slices[gpu]->in_counter[i];
if (factor > max_in_sizing_) max_in_sizing_ = factor;
}
}
if (num_gpus > 1) printf("\t %lld", (long long)(problem->data_slices[gpu]->frontier_queues[num_gpus].keys[0].GetSize()));
printf("\n");
}
printf("\t queue_sizing =\t %lf \t %lf", max_queue_sizing_[0], max_queue_sizing_[1]);
if (num_gpus > 1) printf("\t in_sizing =\t %lf", max_in_sizing_);
printf("\n");
}
// Cleanup
if (org_size ) {delete[] org_size ; org_size = NULL;}
if (problem ) {delete problem ; problem = NULL;}
if (enactor ) {delete enactor ; enactor = NULL;}
if (reference_sigmas ) {delete[] reference_sigmas ; reference_sigmas = NULL;}
if (reference_bc_values ) {delete[] reference_bc_values ; reference_bc_values = NULL;}
if (reference_labels ) {delete[] reference_labels ; reference_labels = NULL;}
if (h_sigmas ) {delete[] h_sigmas ; h_sigmas = NULL;}
if (h_bc_values ) {delete[] h_bc_values ; h_bc_values = NULL;}
if (h_labels ) {delete[] h_labels ; h_labels = NULL;}
cpu_timer.Stop();
info -> info["postprocess_time"] = cpu_timer.ElapsedMillis();
return retval;
}
/******************************************************************************
* Main
******************************************************************************/
template <
typename VertexId,
typename SizeT,
typename Value>
int main_(CommandLineArgs* args)
{
CpuTimer cpu_timer, cpu_timer2;
cpu_timer.Start();
Csr <VertexId, SizeT, Value> csr(false); // graph we process on
Info<VertexId, SizeT, Value> *info = new Info<VertexId, SizeT, Value>;
// graph construction or generation related parameters
info -> info["undirected"] = true; // require undirected input graph
cpu_timer2.Start();
info -> Init("BC", *args, csr); // initialize Info structure
cpu_timer2.Stop();
info -> info["load_time"] = cpu_timer2.ElapsedMillis();
RunTests<VertexId, SizeT, Value>(info); // run test
cpu_timer.Stop();
info->info["total_time"] = cpu_timer.ElapsedMillis();
if (!(info->info["quiet_mode"].get_bool()))
{
info->DisplayStats(); // display collected statistics
}
info->CollectInfo(); // collected all the info and put into JSON mObject
return 0;
}
template <
typename VertexId, // the vertex identifier type, usually int or long long
typename SizeT > // the size tyep, usually int or long long
int main_Value(CommandLineArgs *args)
{
// disabled to reduce compile time
// if (args -> CheckCmdLineFlag("64bit-Value"))
// return main_<VertexId, SizeT, double>(args);
// else
return main_<VertexId, SizeT, float >(args);
}
template <
typename VertexId>
int main_SizeT(CommandLineArgs *args)
{
// disabled to reduce compile time
if (args -> CheckCmdLineFlag("64bit-SizeT"))
return main_Value<VertexId, long long>(args);
else
return main_Value<VertexId, int >(args);
}
int main_VertexId(CommandLineArgs *args)
{
// disabled, because of filter smem size issue
//if (args -> CheckCmdLineFlag("64bit-VertexId"))
// return main_SizeT<long long>(args);
//else
return main_SizeT<int >(args);
}
int main(int argc, char** argv)
{
CommandLineArgs args(argc, argv);
int graph_args = argc - args.ParsedArgc() - 1;
if (argc < 2 || graph_args < 1 || args.CheckCmdLineFlag("help"))
{
Usage();
return 1;
}
return main_VertexId(&args);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
6630d355b008e379b2003894ea1f0d4196a7651e.cu
|
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_bc.cu
*
* @brief Simple test driver program for BC.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <queue>
#include <iostream>
#include <fstream>
#include <algorithm>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// BC includes
#include <gunrock/app/bc/bc_enactor.cuh>
#include <gunrock/app/bc/bc_problem.cuh>
#include <gunrock/app/bc/bc_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// Boost includes
#include <boost/config.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/connected_components.hpp>
#include <boost/graph/bc_clustering.hpp>
#include <boost/graph/iteration_macros.hpp>
using namespace gunrock;
using namespace gunrock::app;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::bc;
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf(
"test <graph-type> [graph-type-arguments]\n"
"Graph type and graph type arguments:\n"
" market <matrix-market-file-name>\n"
" Reads a Matrix-Market coordinate-formatted graph of\n"
" directed/undirected edges from STDIN (or from the\n"
" optionally-specified file).\n"
" rmat (default: rmat_scale = 10, a = 0.57, b = c = 0.19)\n"
" Generate R-MAT graph as input\n"
" --rmat_scale=<vertex-scale>\n"
" --rmat_nodes=<number-nodes>\n"
" --rmat_edgefactor=<edge-factor>\n"
" --rmat_edges=<number-edges>\n"
" --rmat_a=<factor> --rmat_b=<factor> --rmat_c=<factor>\n"
" --rmat_seed=<seed>\n"
" rgg (default: rgg_scale = 10, rgg_thfactor = 0.55)\n"
" Generate Random Geometry Graph as input\n"
" --rgg_scale=<vertex-scale>\n"
" --rgg_nodes=<number-nodes>\n"
" --rgg_thfactor=<threshold-factor>\n"
" --rgg_threshold=<threshold>\n"
" --rgg_vmultipiler=<vmultipiler>\n"
" --rgg_seed=<seed>\n\n"
"Optional arguments:\n"
"[--device=<device_index>] Set GPU(s) for testing (Default: 0).\n"
"[--instrumented] Keep kernels statics [Default: Disable].\n"
" total_queued, search_depth and barrier duty.\n"
" (a relative indicator of load imbalance.)\n"
"[--src=<Vertex-ID|randomize|largestdegree>]\n"
" Begins traversal from the source (Default: 0).\n"
" If randomize: from a random source vertex.\n"
" If largestdegree: from largest degree vertex.\n"
"[--quick] Skip the CPU reference validation process.\n"
"[--mark-pred] Keep both label info and predecessor info.\n"
"[--disable-size-check] Disable frontier queue size check.\n"
"[--grid-size=<grid size>] Maximum allowed grid size setting.\n"
"[--queue-sizing=<factor>] Allocates a frontier queue sized at: \n"
" (graph-edges * <factor>). (Default: 1.0)\n"
"[--in-sizing=<in/out_queue_scale_factor>]\n"
" Allocates a frontier queue sized at: \n"
" (graph-edges * <factor>). (Default: 1.0)\n"
"[--v] Print verbose per iteration debug info.\n"
"[--iteration-num=<num>] Number of runs to perform the test.\n"
"[--traversal-mode=<0|1>] Set traversal strategy, 0 for Load-Balanced\n"
" 1 for Dynamic-Cooperative (Default: dynamic\n"
" determine based on average degree).\n"
"[--partition_method=<random|biasrandom|clustered|metis>]\n"
" Choose partitioner (Default use random).\n"
"[--ref-file=<file_name>] Use pre-computed result in file to verify.\n"
"[--quiet] No output (unless --json is specified).\n"
"[--json] Output JSON-format statistics to STDOUT.\n"
"[--jsonfile=<name>] Output JSON-format statistics to file <name>\n"
"[--jsondir=<dir>] Output JSON-format statistics to <dir>/name,\n"
" where name is auto-generated.\n"
);
}
/**
* @brief Displays the BC result (sigma value and BC value)
*
* @tparam Value
* @tparam SizeT
*
* @param[in] sigmas
* @param[in] bc_values
* @param[in] nodes
* @param[in] quiet
*/
template <
typename SizeT,
typename Value>
void DisplaySolution(
Value *sigmas,
Value *bc_values,
SizeT nodes,
bool quiet = false)
{
if (quiet) return;
if (nodes < 40)
{
printf("[");
for (SizeT i = 0; i < nodes; ++i)
{
PrintValue(i);
printf(":");
PrintValue(sigmas[i]);
printf(",");
PrintValue(bc_values[i]);
printf(" ");
}
printf("]\n");
}
}
/******************************************************************************
* BC Testing Routines
*****************************************************************************/
/**
* @brief Graph edge properties (bundled properties)
*/
struct EdgeProperties
{
int weight;
};
/**
* @brief A simple CPU-based reference BC ranking implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to graph we process on
* @param[in] bc_values Pointer to node bc value
* @param[in] sigmas Pointer to node sigma value
* @param[in] source_path Pointer to a vector to store CPU computed labels for each node
* @param[in] src VertexId of source node if there is any
* @param[in] quiet Don't print out anything to stdout
*/
template <
typename VertexId,
typename SizeT,
typename Value >
void ReferenceBC(
const Csr<VertexId, SizeT, Value> &graph,
Value *bc_values,
Value *sigmas,
VertexId *source_path,
VertexId src,
bool quiet = false)
{
typedef Coo<VertexId, Value> EdgeTupleType;
EdgeTupleType *coo = (EdgeTupleType*) malloc(sizeof(EdgeTupleType) * graph.edges);
if (src == -1)
{
// Perform full exact BC using BGL
using namespace boost;
typedef adjacency_list <setS, vecS, undirectedS, no_property,
EdgeProperties> Graph;
typedef Graph::vertex_descriptor Vertex;
typedef Graph::edge_descriptor Edge;
Graph G;
for (int i = 0; i < graph.nodes; ++i)
{
for (int j = graph.row_offsets[i]; j < graph.row_offsets[i + 1]; ++j)
{
add_edge(vertex(i, G), vertex(graph.column_indices[j], G), G);
}
}
typedef std::map<Edge, int> StdEdgeIndexMap;
StdEdgeIndexMap my_e_index;
typedef boost::associative_property_map< StdEdgeIndexMap > EdgeIndexMap;
EdgeIndexMap e_index(my_e_index);
int i = 0;
BGL_FORALL_EDGES(edge, G, Graph)
{
my_e_index.insert(std::pair<Edge, int>(edge, i));
++i;
}
// Define EdgeCentralityMap
std::vector< double > e_centrality_vec(boost::num_edges(G), 0.0);
// Create the external property map
boost::iterator_property_map< std::vector< double >::iterator,
EdgeIndexMap >
e_centrality_map(e_centrality_vec.begin(), e_index);
// Define VertexCentralityMap
typedef boost::property_map< Graph, boost::vertex_index_t>::type
VertexIndexMap;
VertexIndexMap v_index = get(boost::vertex_index, G);
std::vector< double > v_centrality_vec(boost::num_vertices(G), 0.0);
// Create the external property map
boost::iterator_property_map< std::vector< double >::iterator,
VertexIndexMap>
v_centrality_map(v_centrality_vec.begin(), v_index);
// Perform BC
CpuTimer cpu_timer;
cpu_timer.Start();
brandes_betweenness_centrality(G, v_centrality_map, e_centrality_map);
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
BGL_FORALL_VERTICES(vertex, G, Graph)
{
bc_values[vertex] = (Value)v_centrality_map[vertex];
}
int idx = 0;
BGL_FORALL_EDGES(edge, G, Graph)
{
coo[idx].row = source(edge, G);
coo[idx].col = target(edge, G);
coo[idx++].val = (Value)e_centrality_map[edge];
coo[idx].col = source(edge, G);
coo[idx].row = target(edge, G);
coo[idx++].val = (Value)e_centrality_map[edge];
}
std::stable_sort(coo, coo + graph.edges,
RowFirstTupleCompare<EdgeTupleType>);
if (!quiet)
{
printf("CPU BC finished in %lf msec.", elapsed);
}
}
else
{
// Simple BFS pass to get single pass BC
// VertexId *source_path = new VertexId[graph.nodes];
// Initialize distances
for (VertexId i = 0; i < graph.nodes; ++i)
{
source_path[i] = -1;
bc_values[i] = 0;
sigmas[i] = 0;
}
source_path[src] = 0;
VertexId search_depth = 0;
sigmas[src] = 1;
// Initialize queue for managing previously-discovered nodes
std::deque<VertexId> frontier;
frontier.push_back(src);
//
// Perform one pass of BFS for one source
//
CpuTimer cpu_timer;
cpu_timer.Start();
while (!frontier.empty())
{
// Dequeue node from frontier
VertexId dequeued_node = frontier.front();
frontier.pop_front();
VertexId neighbor_dist = source_path[dequeued_node] + 1;
// Locate adjacency list
int edges_begin = graph.row_offsets[dequeued_node];
int edges_end = graph.row_offsets[dequeued_node + 1];
for (int edge = edges_begin; edge < edges_end; ++edge)
{
// Lookup neighbor and enqueue if undiscovered
VertexId neighbor = graph.column_indices[edge];
if (source_path[neighbor] == -1)
{
source_path[neighbor] = neighbor_dist;
sigmas[neighbor] += sigmas[dequeued_node];
if (search_depth < neighbor_dist)
{
search_depth = neighbor_dist;
}
frontier.push_back(neighbor);
}
else
{
if (source_path[neighbor] == source_path[dequeued_node] + 1)
sigmas[neighbor] += sigmas[dequeued_node];
}
}
}
search_depth++;
for (int iter = search_depth - 2; iter > 0; --iter)
{
int cur_level = 0;
for (int node = 0; node < graph.nodes; ++node)
{
if (source_path[node] == iter)
{
++cur_level;
int edges_begin = graph.row_offsets[node];
int edges_end = graph.row_offsets[node + 1];
for (int edge = edges_begin; edge < edges_end; ++edge)
{
VertexId neighbor = graph.column_indices[edge];
if (source_path[neighbor] == iter + 1)
{
bc_values[node] +=
1.0f * sigmas[node] / sigmas[neighbor] *
(1.0f + bc_values[neighbor]);
}
}
}
}
}
for (int i = 0; i < graph.nodes; ++i)
{
bc_values[i] *= 0.5f;
}
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
if (!quiet)
{
printf("CPU BC finished in %lf msec. Search depth: %d\n",
elapsed, search_depth);
}
//delete[] source_path;
}
free(coo);
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] info Pointer to info contains parameters and statistics.
*
* \return cudaError_t object which indicates the success of
* all CUDA function calls.
*/
template <
typename VertexId,
typename SizeT,
typename Value>
cudaError_t RunTests(Info<VertexId, SizeT, Value> *info)
{
typedef BCProblem < VertexId,
SizeT,
Value,
true> // MARK_PREDECESSORS
Problem; //does not use double buffer
typedef BCEnactor < Problem>
//INSTRUMENT,
//DEBUG,
//SIZE_CHECK >
Enactor;
// parse configurations from mObject info
Csr<VertexId, SizeT, Value> *graph = info->csr_ptr;
VertexId src = info->info["source_vertex" ].get_int64();
bool quiet_mode = info->info["quiet_mode" ].get_bool();
int max_grid_size = info->info["max_grid_size" ].get_int();
int num_gpus = info->info["num_gpus" ].get_int();
double max_queue_sizing = info->info["max_queue_sizing" ].get_real();
double max_queue_sizing1 = info->info["max_queue_sizing1" ].get_real();
double max_in_sizing = info->info["max_in_sizing" ].get_real();
std::string partition_method = info->info["partition_method" ].get_str();
double partition_factor = info->info["partition_factor" ].get_real();
int partition_seed = info->info["partition_seed" ].get_int();
bool quick_mode = info->info["quick_mode" ].get_bool();
bool stream_from_host = info->info["stream_from_host" ].get_bool();
bool instrument = info->info["instrument" ].get_bool ();
bool debug = info->info["debug_mode" ].get_bool ();
bool size_check = info->info["size_check" ].get_bool ();
int iterations = info->info["num_iteration" ].get_int();
std::string src_type = info->info["source_type" ].get_str ();
int src_seed = info->info["source_seed" ].get_int ();
std::string ref_filename = info->info["ref_filename" ].get_str();
int communicate_latency = info->info["communicate_latency"].get_int ();
float communicate_multipy = info->info["communicate_multipy"].get_real();
int expand_latency = info->info["expand_latency" ].get_int ();
int subqueue_latency = info->info["subqueue_latency" ].get_int ();
int fullqueue_latency = info->info["fullqueue_latency" ].get_int ();
int makeout_latency = info->info["makeout_latency" ].get_int ();
std::string traversal_mode = info->info["traversal_mode" ].get_str ();
if (traversal_mode == "TWC") traversal_mode = "LB";
if (max_queue_sizing < 0) max_queue_sizing = 1.2;
if (max_in_sizing < 0) max_in_sizing = 1.1;
if (communicate_multipy > 1) max_in_sizing *= communicate_multipy;
CpuTimer cpu_timer;
cudaError_t retval = cudaSuccess;
cpu_timer.Start();
json_spirit::mArray device_list = info->info["device_list"].get_array();
int* gpu_idx = new int[num_gpus];
for (int i = 0; i < num_gpus; i++) gpu_idx[i] = device_list[i].get_int();
// TODO: remove after merge mgpu-cq
ContextPtr *context = (ContextPtr*) info->context;
cudaStream_t *streams = (cudaStream_t*)info->streams;
// Allocate host-side array (for both reference and GPU-computed results)
Value *reference_bc_values = new Value [graph->nodes];
Value *reference_sigmas = new Value [graph->nodes];
VertexId *reference_labels = new VertexId[graph->nodes];
Value *h_sigmas = new Value [graph->nodes];
Value *h_bc_values = new Value [graph->nodes];
VertexId *h_labels = new VertexId[graph->nodes];
Value *reference_check_bc_values = (quick_mode) ? NULL : reference_bc_values;
Value *reference_check_sigmas = (quick_mode || (src == -1)) ? NULL : reference_sigmas;
VertexId *reference_check_labels = (quick_mode || (src == -1)) ? NULL : reference_labels;
size_t *org_size = new size_t[num_gpus];
for (int gpu = 0; gpu < num_gpus; gpu++)
{
size_t dummy;
if (retval = util::SetDevice(gpu_idx[gpu])) return retval;
cudaMemGetInfo(&(org_size[gpu]), &dummy);
}
Problem* problem = new Problem(false); // allocate problem on GPU
if (retval = util::GRError(problem->Init(
stream_from_host,
graph,
NULL,
num_gpus,
gpu_idx,
partition_method,
streams,
max_queue_sizing,
max_in_sizing,
partition_factor,
partition_seed),
"BC Problem Initialization Failed", __FILE__, __LINE__))
return retval;
Enactor* enactor = new Enactor(
num_gpus, gpu_idx, instrument, debug, size_check); // enactor map
if (retval = util::GRError(enactor->Init(
context, problem, max_grid_size, traversal_mode),
"BC Enactor init failed", __FILE__, __LINE__))
return retval;
enactor -> communicate_latency = communicate_latency;
enactor -> communicate_multipy = communicate_multipy;
enactor -> expand_latency = expand_latency;
enactor -> subqueue_latency = subqueue_latency;
enactor -> fullqueue_latency = fullqueue_latency;
enactor -> makeout_latency = makeout_latency;
if (retval = util::SetDevice(gpu_idx[0])) return retval;
if (retval = util::latency::Test(
streams[0], problem -> data_slices[0] -> latency_data,
communicate_latency,
communicate_multipy,
expand_latency,
subqueue_latency,
fullqueue_latency,
makeout_latency)) return retval;
cpu_timer.Stop();
info -> info["preprocess_time"] = cpu_timer.ElapsedMillis();
// perform BC
double total_elapsed = 0.0;
double single_elapsed = 0.0;
double max_elapsed = 0.0;
double min_elapsed = 1e10;
json_spirit::mArray process_times;
VertexId start_src, end_src;
if (src_type == "random2")
{
if (src_seed == -1) src_seed = time(NULL);
if (!quiet_mode)
printf("src_seed = %d\n", src_seed);
srand(src_seed);
}
if (!quiet_mode)
printf("Using traversal-mode %s\n", traversal_mode.c_str());
for (int iter = 0; iter < iterations; ++iter)
{
//if (!quiet_mode)
//{
// printf("iteration:%d\n", iter);
//}
if (src_type == "random2")
{
bool src_valid = false;
while (!src_valid)
{
src = rand() % graph -> nodes;
if (graph -> row_offsets[src] != graph -> row_offsets[src+1])
src_valid = true;
}
}
if (src == -1)
{
start_src = 0;
end_src = graph->nodes;
}
else
{
start_src = src;
end_src = src + 1;
}
for (int gpu = 0; gpu < num_gpus; gpu++)
{
if (retval = util::SetDevice(gpu_idx[gpu])) return retval;
util::MemsetKernel <<< 128, 128>>>(
problem -> data_slices[gpu] -> bc_values.GetPointer(util::DEVICE),
(Value)0.0, problem->sub_graphs[gpu].nodes);
}
if (retval = util::GRError(problem->Reset(
0, enactor->GetFrontierType(),
max_queue_sizing, max_queue_sizing1),
"BC Problem Data Reset Failed", __FILE__, __LINE__))
return retval;
if (!quiet_mode)
{
printf("__________________________\n"); fflush(stdout);
}
single_elapsed = 0;
for (VertexId i = start_src; i < end_src; ++i)
{
if (retval = util::GRError(problem->Reset(
i, enactor->GetFrontierType(),
max_queue_sizing, max_queue_sizing1),
"BC Problem Data Reset Failed", __FILE__, __LINE__))
return retval;
if (retval = util::GRError(enactor ->Reset(),
"BC Enactor Reset failed", __FILE__, __LINE__))
return retval;
for (int gpu = 0; gpu < num_gpus; gpu++)
{
if (retval = util::SetDevice(gpu_idx[gpu]))
return retval;
if (retval = util::GRError(cudaDeviceSynchronize(),
"cudaDeviceSynchronize failed", __FILE__, __LINE__))
return retval;
}
cpu_timer.Start();
if (retval = util::GRError(enactor ->Enact(i, traversal_mode),
"BC Problem Enact Failed", __FILE__, __LINE__))
return retval;
cpu_timer.Stop();
single_elapsed += cpu_timer.ElapsedMillis();
}
total_elapsed += single_elapsed;
process_times.push_back(single_elapsed);
if (single_elapsed > max_elapsed) max_elapsed = single_elapsed;
if (single_elapsed < min_elapsed) min_elapsed = single_elapsed;
for (int gpu = 0; gpu < num_gpus; gpu++)
{
if (retval = util::SetDevice(gpu_idx[gpu])) return retval;
util::MemsetScaleKernel <<< 128, 128>>>(
problem -> data_slices[gpu] -> bc_values.GetPointer(util::DEVICE),
(Value)0.5, problem -> sub_graphs[gpu].nodes);
}
if (!quiet_mode)
{
printf("--------------------------\n"
"iteration %d elapsed: %lf ms, src = %lld\n",
iter, single_elapsed, (long long)src);
fflush(stdout);
}
}
total_elapsed /= iterations;
info -> info["process_times"] = process_times;
info -> info["min_process_time"] = min_elapsed;
info -> info["max_process_time"] = max_elapsed;
// compute reference CPU BC solution for source-distance
if (!quick_mode)
{
if (ref_filename.empty())
{
if (!quiet_mode) { printf("Computing reference value ...\n"); }
ReferenceBC(
*graph,
reference_check_bc_values,
reference_check_sigmas,
reference_check_labels,
src,
quiet_mode);
if (!quiet_mode) { printf("\n"); }
}
else
{
std::ifstream fin;
fin.open(ref_filename.c_str(), std::ios::binary);
for (int i = 0; i < graph->nodes; ++i)
{
fin.read(reinterpret_cast<char*>(&reference_check_bc_values[i]), sizeof(Value));
}
fin.close();
}
}
cpu_timer.Start();
// Copy out results
if (retval = util::GRError(problem -> Extract(
h_sigmas, h_bc_values, h_labels),
"BC Problem Data Extraction Failed", __FILE__, __LINE__))
return retval;
// Verify the result
if (!quick_mode)
{
//util::cpu_mt::PrintCPUArray<SizeT, Value>("reference_check_bc_values", reference_check_bc_values, graph->nodes);
//util::cpu_mt::PrintCPUArray<SizeT, Value>("bc_values", h_bc_values, graph->nodes);
if (!quiet_mode) { printf("Validity BC Value: "); }
int num_error = CompareResults(
h_bc_values, reference_check_bc_values,
graph->nodes, true, quiet_mode);
if (num_error > 0)
{
if (!quiet_mode) { printf("Number of errors occurred: %d\n", num_error); }
}
if (!quiet_mode) { printf("\n"); }
}
if (reference_check_sigmas != NULL)
{
if (!quiet_mode) { printf("Validity Sigma: "); }
int num_error = CompareResults(
h_sigmas, reference_check_sigmas,
graph->nodes, true, quiet_mode);
if (num_error > 0)
{
if (!quiet_mode)
{
printf("Number of errors occurred: %d\n", num_error);
}
}
if (!quiet_mode) { printf("\n"); }
}
if (reference_check_labels != NULL)
{
if (!quiet_mode) { printf("Validity labels: "); }
int num_error = CompareResults(
h_labels, reference_check_labels,
graph->nodes, true, quiet_mode);
if (num_error > 0)
{
if (!quiet_mode)
{
printf("Number of errors occurred: %d\n", num_error);
}
}
if (!quiet_mode) { printf("\n"); }
}
if (!quiet_mode)
{
// Display Solution
DisplaySolution(h_sigmas, h_bc_values, graph->nodes);
}
info->ComputeTraversalStats( // compute running statistics
enactor->enactor_stats.GetPointer(), total_elapsed, h_labels);
if (!quiet_mode)
{
printf("\n\tMemory Usage(B)\t");
for (int gpu = 0; gpu < num_gpus; gpu++)
if (num_gpus > 1) {if (gpu != 0) printf(" #keys%d,0\t #keys%d,1\t #ins%d,0\t #ins%d,1", gpu, gpu, gpu, gpu); else printf(" #keys%d,0\t #keys%d,1", gpu, gpu);}
else printf(" #keys%d,0\t #keys%d,1", gpu, gpu);
if (num_gpus > 1) printf(" #keys%d", num_gpus);
printf("\n");
double max_queue_sizing_[2] = {0, 0}, max_in_sizing_ = 0;
for (int gpu = 0; gpu < num_gpus; gpu++)
{
size_t gpu_free, dummy;
cudaSetDevice(gpu_idx[gpu]);
cudaMemGetInfo(&gpu_free, &dummy);
printf("GPU_%d\t %ld", gpu_idx[gpu], org_size[gpu] - gpu_free);
for (int i = 0; i < num_gpus; i++)
{
for (int j = 0; j < 2; j++)
{
SizeT x = problem->data_slices[gpu]->frontier_queues[i].keys[j].GetSize();
printf("\t %lld", (long long) x);
double factor = 1.0 * x / (num_gpus > 1 ? problem->graph_slices[gpu]->in_counter[i] : problem->graph_slices[gpu]->nodes);
if (factor > max_queue_sizing_[j]) max_queue_sizing_[j] = factor;
}
if (num_gpus > 1 && i != 0 )
for (int t = 0; t < 2; t++)
{
SizeT x = problem->data_slices[gpu][0].keys_in[t][i].GetSize();
printf("\t %lld", (long long) x);
double factor = 1.0 * x / problem->graph_slices[gpu]->in_counter[i];
if (factor > max_in_sizing_) max_in_sizing_ = factor;
}
}
if (num_gpus > 1) printf("\t %lld", (long long)(problem->data_slices[gpu]->frontier_queues[num_gpus].keys[0].GetSize()));
printf("\n");
}
printf("\t queue_sizing =\t %lf \t %lf", max_queue_sizing_[0], max_queue_sizing_[1]);
if (num_gpus > 1) printf("\t in_sizing =\t %lf", max_in_sizing_);
printf("\n");
}
// Cleanup
if (org_size ) {delete[] org_size ; org_size = NULL;}
if (problem ) {delete problem ; problem = NULL;}
if (enactor ) {delete enactor ; enactor = NULL;}
if (reference_sigmas ) {delete[] reference_sigmas ; reference_sigmas = NULL;}
if (reference_bc_values ) {delete[] reference_bc_values ; reference_bc_values = NULL;}
if (reference_labels ) {delete[] reference_labels ; reference_labels = NULL;}
if (h_sigmas ) {delete[] h_sigmas ; h_sigmas = NULL;}
if (h_bc_values ) {delete[] h_bc_values ; h_bc_values = NULL;}
if (h_labels ) {delete[] h_labels ; h_labels = NULL;}
cpu_timer.Stop();
info -> info["postprocess_time"] = cpu_timer.ElapsedMillis();
return retval;
}
/******************************************************************************
* Main
******************************************************************************/
template <
typename VertexId,
typename SizeT,
typename Value>
int main_(CommandLineArgs* args)
{
CpuTimer cpu_timer, cpu_timer2;
cpu_timer.Start();
Csr <VertexId, SizeT, Value> csr(false); // graph we process on
Info<VertexId, SizeT, Value> *info = new Info<VertexId, SizeT, Value>;
// graph construction or generation related parameters
info -> info["undirected"] = true; // require undirected input graph
cpu_timer2.Start();
info -> Init("BC", *args, csr); // initialize Info structure
cpu_timer2.Stop();
info -> info["load_time"] = cpu_timer2.ElapsedMillis();
RunTests<VertexId, SizeT, Value>(info); // run test
cpu_timer.Stop();
info->info["total_time"] = cpu_timer.ElapsedMillis();
if (!(info->info["quiet_mode"].get_bool()))
{
info->DisplayStats(); // display collected statistics
}
info->CollectInfo(); // collected all the info and put into JSON mObject
return 0;
}
template <
typename VertexId, // the vertex identifier type, usually int or long long
typename SizeT > // the size tyep, usually int or long long
int main_Value(CommandLineArgs *args)
{
// disabled to reduce compile time
// if (args -> CheckCmdLineFlag("64bit-Value"))
// return main_<VertexId, SizeT, double>(args);
// else
return main_<VertexId, SizeT, float >(args);
}
template <
typename VertexId>
int main_SizeT(CommandLineArgs *args)
{
// disabled to reduce compile time
if (args -> CheckCmdLineFlag("64bit-SizeT"))
return main_Value<VertexId, long long>(args);
else
return main_Value<VertexId, int >(args);
}
int main_VertexId(CommandLineArgs *args)
{
// disabled, because of filter smem size issue
//if (args -> CheckCmdLineFlag("64bit-VertexId"))
// return main_SizeT<long long>(args);
//else
return main_SizeT<int >(args);
}
int main(int argc, char** argv)
{
CommandLineArgs args(argc, argv);
int graph_args = argc - args.ParsedArgc() - 1;
if (argc < 2 || graph_args < 1 || args.CheckCmdLineFlag("help"))
{
Usage();
return 1;
}
return main_VertexId(&args);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
b55e09ef0f79f801045a46a7a73a10d9d550b25f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <cstdio>
#include <exception>
#define CE(err) \
{ \
if (err != hipSuccess) \
{ \
std::printf("CUDA error in %s (%s:%d) - %s", \
__FUNCTION__, \
__FILE__, \
__LINE__, \
hipGetErrorString(err)); \
std::terminate(); \
} \
}
constexpr size_t operator"" KiB(size_t size)
{
return size * 1024;
}
__global__ void static_reverse(int* const d, int const n)
{
__shared__ int s[64];
int const t = threadIdx.x;
int const tr = n - t - 1;
s[t] = d[t];
__syncthreads();
d[t] = s[tr];
}
__global__ void dynamic_reverse(int* d, int n)
{
extern __shared__ int s[];
int const t = threadIdx.x;
int const tr = n - t - 1;
s[t] = d[t];
__syncthreads();
d[t] = s[tr];
}
int main()
{
constexpr int N = 64;
static_assert(N <= 1024,
"N can be at most equal to the maximum number of threads in a block");
constexpr size_t shared_memory_capacity = 48KiB;
static_assert(N * sizeof(int) <= shared_memory_capacity,
"Shared memory array size exceeds shared memory capacity");
int a[N];
int r[N];
int d[N];
for (int i = 0; i < N; i++)
{
a[i] = i;
r[i] = N - i - 1;
d[i] = 0;
}
int* d_d;
CE(hipMalloc(&d_d, N * sizeof(int)));
// run version with static shared memory
CE(hipMemcpy(d_d, a, N * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( static_reverse), dim3(1), dim3(N), 0, 0, d_d, N);
CE(hipGetLastError());
CE(hipMemcpy(d, d_d, N * sizeof(int), hipMemcpyDeviceToHost));
for (int i = 0; i < N; i++)
{
if (d[i] != r[i])
{
std::printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]);
}
}
// run dynamic shared memory version
CE(hipMemcpy(d_d, a, N * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( dynamic_reverse), dim3(1), dim3(N), N * sizeof(int), 0, d_d, N);
CE(hipGetLastError());
CE(hipMemcpy(d, d_d, N * sizeof(int), hipMemcpyDeviceToHost));
for (int i = 0; i < N; i++)
{
if (d[i] != r[i])
{
std::printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]);
}
}
}
|
b55e09ef0f79f801045a46a7a73a10d9d550b25f.cu
|
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cstdio>
#include <exception>
#define CE(err) \
{ \
if (err != cudaSuccess) \
{ \
std::printf("CUDA error in %s (%s:%d) - %s", \
__FUNCTION__, \
__FILE__, \
__LINE__, \
cudaGetErrorString(err)); \
std::terminate(); \
} \
}
constexpr size_t operator"" KiB(size_t size)
{
return size * 1024;
}
__global__ void static_reverse(int* const d, int const n)
{
__shared__ int s[64];
int const t = threadIdx.x;
int const tr = n - t - 1;
s[t] = d[t];
__syncthreads();
d[t] = s[tr];
}
__global__ void dynamic_reverse(int* d, int n)
{
extern __shared__ int s[];
int const t = threadIdx.x;
int const tr = n - t - 1;
s[t] = d[t];
__syncthreads();
d[t] = s[tr];
}
int main()
{
constexpr int N = 64;
static_assert(N <= 1024,
"N can be at most equal to the maximum number of threads in a block");
constexpr size_t shared_memory_capacity = 48KiB;
static_assert(N * sizeof(int) <= shared_memory_capacity,
"Shared memory array size exceeds shared memory capacity");
int a[N];
int r[N];
int d[N];
for (int i = 0; i < N; i++)
{
a[i] = i;
r[i] = N - i - 1;
d[i] = 0;
}
int* d_d;
CE(cudaMalloc(&d_d, N * sizeof(int)));
// run version with static shared memory
CE(cudaMemcpy(d_d, a, N * sizeof(int), cudaMemcpyHostToDevice));
static_reverse<<<1, N>>>(d_d, N);
CE(cudaGetLastError());
CE(cudaMemcpy(d, d_d, N * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 0; i < N; i++)
{
if (d[i] != r[i])
{
std::printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]);
}
}
// run dynamic shared memory version
CE(cudaMemcpy(d_d, a, N * sizeof(int), cudaMemcpyHostToDevice));
dynamic_reverse<<<1, N, N * sizeof(int)>>>(d_d, N);
CE(cudaGetLastError());
CE(cudaMemcpy(d, d_d, N * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 0; i < N; i++)
{
if (d[i] != r[i])
{
std::printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]);
}
}
}
|
ec65a743f15cd84db8f0429f76c549a29546b7c5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <read_gauge.h>
#include <gauge_field.h>
#include "gauge_force_quda.h"
#ifdef MULTI_GPU
#include "face_quda.h"
#endif
namespace quda {
#define GF_SITE_MATRIX_LOAD_TEX 1
//single precsison, 12-reconstruct
#if (GF_SITE_MATRIX_LOAD_TEX == 1)
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX(siteLink0TexSingle_recon, dir, idx, var, gf.site_ga_stride)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX(siteLink1TexSingle_recon, dir, idx, var, gf.site_ga_stride)
#else
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE(linkEven, dir, idx, var, gf.site_ga_stride)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE(linkOdd, dir, idx, var, gf.site_ga_stride)
#endif
#define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var, gf.mom_ga_stride)
#define RECONSTRUCT_MATRIX(sign, var) RECONSTRUCT_LINK_12(sign,var)
#define DECLARE_LINK_VARS(var) FloatN var##0, var##1, var##2, var##3, var##4
#define N_IN_FLOATN 4
#define GAUGE_FORCE_KERN_NAME parity_compute_gauge_force_kernel_sp12
#include "gauge_force_core.h"
#undef LOAD_EVEN_MATRIX
#undef LOAD_ODD_MATRIX
#undef LOAD_ANTI_HERMITIAN
#undef RECONSTRUCT_MATRIX
#undef DECLARE_LINK_VARS
#undef N_IN_FLOATN
#undef GAUGE_FORCE_KERN_NAME
//double precsison, 12-reconstruct
#if (GF_SITE_MATRIX_LOAD_TEX == 1)
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE_TEX(siteLink0TexDouble, linkEven, dir, idx, var, gf.site_ga_stride)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE_TEX(siteLink1TexDouble, linkOdd, dir, idx, var, gf.site_ga_stride)
#else
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE(linkEven, dir, idx, var, gf.site_ga_stride)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE(linkOdd, dir, idx, var, gf.site_ga_stride)
#endif
#define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var, gf.mom_ga_stride)
#define RECONSTRUCT_MATRIX(sign, var) RECONSTRUCT_LINK_12(sign,var)
#define DECLARE_LINK_VARS(var) FloatN var##0, var##1, var##2, var##3, var##4, var##5, var##6, var##7, var##8
#define N_IN_FLOATN 2
#define GAUGE_FORCE_KERN_NAME parity_compute_gauge_force_kernel_dp12
#include "gauge_force_core.h"
#undef LOAD_EVEN_MATRIX
#undef LOAD_ODD_MATRIX
#undef LOAD_ANTI_HERMITIAN
#undef RECONSTRUCT_MATRIX
#undef DECLARE_LINK_VARS
#undef N_IN_FLOATN
#undef GAUGE_FORCE_KERN_NAME
//single precision, 18-reconstruct
#if (GF_SITE_MATRIX_LOAD_TEX == 1)
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_TEX(siteLink0TexSingle, dir, idx, var, gf.site_ga_stride)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_TEX(siteLink1TexSingle, dir, idx, var, gf.site_ga_stride)
#else
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_18(linkEven, dir, idx, var, gf.site_ga_stride)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_18(linkOdd, dir, idx, var, gf.site_ga_stride)
#endif
#define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var,gf.mom_ga_stride)
#define RECONSTRUCT_MATRIX(sign, var)
#define DECLARE_LINK_VARS(var) FloatN var##0, var##1, var##2, var##3, var##4, var##5, var##6, var##7, var##8
#define N_IN_FLOATN 2
#define GAUGE_FORCE_KERN_NAME parity_compute_gauge_force_kernel_sp18
#include "gauge_force_core.h"
#undef LOAD_EVEN_MATRIX
#undef LOAD_ODD_MATRIX
#undef LOAD_ANTI_HERMITIAN
#undef RECONSTRUCT_MATRIX
#undef DECLARE_LINK_VARS
#undef N_IN_FLOATN
#undef GAUGE_FORCE_KERN_NAME
//double precision, 18-reconstruct
#if (GF_SITE_MATRIX_LOAD_TEX == 1)
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_TEX(siteLink0TexDouble, linkEven, dir, idx, var, gf.site_ga_stride)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_TEX(siteLink1TexDouble, linkOdd, dir, idx, var, gf.site_ga_stride)
#else
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_18(linkEven, dir, idx, var, gf.site_ga_stride)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_18(linkOdd, dir, idx, var, gf.site_ga_stride)
#endif
#define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var, gf.mom_ga_stride)
#define RECONSTRUCT_MATRIX(sign, var)
#define DECLARE_LINK_VARS(var) FloatN var##0, var##1, var##2, var##3, var##4, var##5, var##6, var##7, var##8
#define N_IN_FLOATN 2
#define GAUGE_FORCE_KERN_NAME parity_compute_gauge_force_kernel_dp18
#include "gauge_force_core.h"
#undef LOAD_EVEN_MATRIX
#undef LOAD_ODD_MATRIX
#undef LOAD_ANTI_HERMITIAN
#undef RECONSTRUCT_MATRIX
#undef DECLARE_LINK_VARS
#undef N_IN_FLOATN
#undef GAUGE_FORCE_KERN_NAME
void
gauge_force_init_cuda(QudaGaugeParam* param, int path_max_length)
{
static int gauge_force_init_cuda_flag = 0;
if (gauge_force_init_cuda_flag){
return;
}
gauge_force_init_cuda_flag=1;
int* X = param->X;
int Vh = X[0]*X[1]*X[2]*X[3]/2;
fat_force_const_t gf_h;
gf_h.path_max_length = path_max_length;
#ifdef MULTI_GPU
int Vh_ex = (X[0]+4)*(X[1]+4)*(X[2]+4)*(X[3]+4)/2;
gf_h.site_ga_stride = param->site_ga_pad + Vh_ex;
#else
gf_h.site_ga_stride = param->site_ga_pad + Vh;
#endif
gf_h.mom_ga_stride = param->mom_ga_pad + Vh;
hipMemcpyToSymbol(gf, &gf_h, sizeof(fat_force_const_t));
}
class GaugeForceCuda : public Tunable {
private:
cudaGaugeField &mom;
const int dir;
const double &eb3;
const cudaGaugeField &link;
const int *input_path;
const int *length;
const void *path_coeff;
const int num_paths;
const kernel_param_t &kparam;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
bool advanceBlockDim(TuneParam ¶m) const {
bool rtn = Tunable::advanceBlockDim(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
return rtn;
}
public:
GaugeForceCuda(cudaGaugeField &mom, const int dir, const double &eb3, const cudaGaugeField &link,
const int *input_path, const int *length, const void *path_coeff,
const int num_paths, const kernel_param_t &kparam) :
mom(mom), dir(dir), eb3(eb3), link(link), input_path(input_path), length(length),
path_coeff(path_coeff), num_paths(num_paths), kparam(kparam) {
if(link.Precision() == QUDA_DOUBLE_PRECISION){
hipBindTexture(0, siteLink0TexDouble, link.Even_p(), link.Bytes()/2);
hipBindTexture(0, siteLink1TexDouble, link.Odd_p(), link.Bytes()/2);
}else{ //QUDA_SINGLE_PRECISION
if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){
hipBindTexture(0, siteLink0TexSingle, link.Even_p(), link.Bytes()/2);
hipBindTexture(0, siteLink1TexSingle, link.Odd_p(), link.Bytes()/2);
}else{//QUDA_RECONSTRUCT_12
hipBindTexture(0, siteLink0TexSingle_recon, link.Even_p(), link.Bytes()/2);
hipBindTexture(0, siteLink1TexSingle_recon, link.Odd_p(), link.Bytes()/2);
}
}
}
virtual ~GaugeForceCuda() {
if(link.Precision() == QUDA_DOUBLE_PRECISION){
hipBindTexture(0, siteLink0TexDouble, link.Even_p(), link.Bytes()/2);
hipBindTexture(0, siteLink1TexDouble, link.Odd_p(), link.Bytes()/2);
}else{ //QUDA_SINGLE_PRECISION
if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){
hipBindTexture(0, siteLink0TexSingle, link.Even_p(), link.Bytes()/2);
hipBindTexture(0, siteLink1TexSingle, link.Odd_p(), link.Bytes()/2);
}else{//QUDA_RECONSTRUCT_12
hipBindTexture(0, siteLink0TexSingle_recon, link.Even_p(), link.Bytes()/2);
hipBindTexture(0, siteLink1TexSingle_recon, link.Odd_p(), link.Bytes()/2);
}
}
}
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if(link.Precision() == QUDA_DOUBLE_PRECISION){
if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){
hipLaunchKernelGGL(( parity_compute_gauge_force_kernel_dp18<0>), dim3(tp.grid), dim3(tp.block), 0, 0, (double2*)mom.Even_p(), (double2*)mom.Odd_p(),
dir, eb3,
(double2*)link.Even_p(), (double2*)link.Odd_p(),
input_path, length, (double*)path_coeff,
num_paths, kparam);
hipLaunchKernelGGL(( parity_compute_gauge_force_kernel_dp18<1>), dim3(tp.grid), dim3(tp.block), 0, 0, (double2*)mom.Even_p(), (double2*)mom.Odd_p(),
dir, eb3,
(double2*)link.Even_p(), (double2*)link.Odd_p(),
input_path, length, (double*)path_coeff,
num_paths, kparam);
}else{ //QUDA_RECONSTRUCT_12
hipLaunchKernelGGL(( parity_compute_gauge_force_kernel_dp12<0>), dim3(tp.grid), dim3(tp.block), 0, 0, (double2*)mom.Even_p(), (double2*)mom.Odd_p(),
dir, eb3,
(double2*)link.Even_p(), (double2*)link.Odd_p(),
input_path, length, (double*)path_coeff,
num_paths, kparam);
hipLaunchKernelGGL(( parity_compute_gauge_force_kernel_dp12<1>), dim3(tp.grid), dim3(tp.block), 0, 0, (double2*)mom.Even_p(), (double2*)mom.Odd_p(),
dir, eb3,
(double2*)link.Even_p(), (double2*)link.Odd_p(),
input_path, length, (double*)path_coeff,
num_paths, kparam);
}
}else{ //QUDA_SINGLE_PRECISION
if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){
hipLaunchKernelGGL(( parity_compute_gauge_force_kernel_sp18<0>), dim3(tp.grid), dim3(tp.block), 0, 0, (float2*)mom.Even_p(), (float2*)mom.Odd_p(),
dir, eb3,
(float2*)link.Even_p(), (float2*)link.Odd_p(),
input_path, length, (float*)path_coeff,
num_paths, kparam);
hipLaunchKernelGGL(( parity_compute_gauge_force_kernel_sp18<1>), dim3(tp.grid), dim3(tp.block), 0, 0, (float2*)mom.Even_p(), (float2*)mom.Odd_p(),
dir, eb3,
(float2*)link.Even_p(), (float2*)link.Odd_p(),
input_path, length, (float*)path_coeff,
num_paths, kparam);
}else{ //QUDA_RECONSTRUCT_12
hipLaunchKernelGGL(( parity_compute_gauge_force_kernel_sp12<0>), dim3(tp.grid), dim3(tp.block), 0, 0, (float2*)mom.Even_p(), (float2*)mom.Odd_p(),
dir, eb3,
(float4*)link.Even_p(), (float4*)link.Odd_p(),
input_path, length, (float*)path_coeff,
num_paths, kparam);
//odd
/* The reason we do not switch the even/odd function input paramemters and the texture binding
* is that we use the oddbit to decided where to load, in the kernel function
*/
hipLaunchKernelGGL(( parity_compute_gauge_force_kernel_sp12<1>), dim3(tp.grid), dim3(tp.block), 0, 0, (float2*)mom.Even_p(), (float2*)mom.Odd_p(),
dir, eb3,
(float4*)link.Even_p(), (float4*)link.Odd_p(),
input_path, length, (float*)path_coeff,
num_paths, kparam);
}
}
}
void preTune() { mom.backup(); }
void postTune() { mom.restore(); }
void initTuneParam(TuneParam ¶m) const {
Tunable::initTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const {
Tunable::defaultTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
long long flops() const { return 0; } // FIXME: add flops counter
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << link.X()[0] << "x";
vol << link.X()[1] << "x";
vol << link.X()[2] << "x";
vol << link.X()[3] << "x";
aux << "threads=" << link.Volume() << ",prec=" << link.Precision();
aux << "stride=" << link.Stride() << ",recon=" << link.Reconstruct();
aux << "dir=" << dir << "num_paths=" << num_paths;
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
};
void
gauge_force_cuda_dir(cudaGaugeField& cudaMom, const int dir, const double eb3, const cudaGaugeField& cudaSiteLink,
const QudaGaugeParam* param, int** input_path, const int* length, const void* path_coeff,
const int num_paths, const int max_length)
{
//input_path
size_t bytes = num_paths*max_length*sizeof(int);
int *input_path_d = (int *) device_malloc(bytes);
hipMemset(input_path_d, 0, bytes);
checkCudaError();
int* input_path_h = (int *) safe_malloc(bytes);
memset(input_path_h, 0, bytes);
for(int i=0; i < num_paths; i++) {
for(int j=0; j < length[i]; j++) {
input_path_h[i*max_length + j] = input_path[i][j];
}
}
hipMemcpy(input_path_d, input_path_h, bytes, hipMemcpyHostToDevice);
//length
int* length_d = (int *) device_malloc(num_paths*sizeof(int));
hipMemcpy(length_d, length, num_paths*sizeof(int), hipMemcpyHostToDevice);
//path_coeff
int gsize = param->cuda_prec;
void* path_coeff_d = device_malloc(num_paths*gsize);
hipMemcpy(path_coeff_d, path_coeff, num_paths*gsize, hipMemcpyHostToDevice);
//compute the gauge forces
int volume = param->X[0]*param->X[1]*param->X[2]*param->X[3];
kernel_param_t kparam;
#ifdef MULTI_GPU
for(int i=0; i<4; i++) {
kparam.ghostDim[i] = commDimPartitioned(i);
}
#endif
kparam.threads = volume/2;
GaugeForceCuda gaugeForce(cudaMom, dir, eb3, cudaSiteLink, input_path_d,
length_d, path_coeff_d, num_paths, kparam);
gaugeForce.apply(0);
checkCudaError();
host_free(input_path_h);
device_free(input_path_d);
device_free(length_d);
device_free(path_coeff_d);
}
void
gauge_force_cuda(cudaGaugeField& cudaMom, double eb3, cudaGaugeField& cudaSiteLink,
QudaGaugeParam* param, int*** input_path,
int* length, void* path_coeff, int num_paths, int max_length)
{
for(int dir=0; dir < 4; dir++){
gauge_force_cuda_dir(cudaMom, dir, eb3, cudaSiteLink, param, input_path[dir],
length, path_coeff, num_paths, max_length);
}
}
} // namespace quda
|
ec65a743f15cd84db8f0429f76c549a29546b7c5.cu
|
#include <read_gauge.h>
#include <gauge_field.h>
#include "gauge_force_quda.h"
#ifdef MULTI_GPU
#include "face_quda.h"
#endif
namespace quda {
#define GF_SITE_MATRIX_LOAD_TEX 1
//single precsison, 12-reconstruct
#if (GF_SITE_MATRIX_LOAD_TEX == 1)
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX(siteLink0TexSingle_recon, dir, idx, var, gf.site_ga_stride)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX(siteLink1TexSingle_recon, dir, idx, var, gf.site_ga_stride)
#else
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE(linkEven, dir, idx, var, gf.site_ga_stride)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE(linkOdd, dir, idx, var, gf.site_ga_stride)
#endif
#define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var, gf.mom_ga_stride)
#define RECONSTRUCT_MATRIX(sign, var) RECONSTRUCT_LINK_12(sign,var)
#define DECLARE_LINK_VARS(var) FloatN var##0, var##1, var##2, var##3, var##4
#define N_IN_FLOATN 4
#define GAUGE_FORCE_KERN_NAME parity_compute_gauge_force_kernel_sp12
#include "gauge_force_core.h"
#undef LOAD_EVEN_MATRIX
#undef LOAD_ODD_MATRIX
#undef LOAD_ANTI_HERMITIAN
#undef RECONSTRUCT_MATRIX
#undef DECLARE_LINK_VARS
#undef N_IN_FLOATN
#undef GAUGE_FORCE_KERN_NAME
//double precsison, 12-reconstruct
#if (GF_SITE_MATRIX_LOAD_TEX == 1)
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE_TEX(siteLink0TexDouble, linkEven, dir, idx, var, gf.site_ga_stride)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE_TEX(siteLink1TexDouble, linkOdd, dir, idx, var, gf.site_ga_stride)
#else
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE(linkEven, dir, idx, var, gf.site_ga_stride)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE(linkOdd, dir, idx, var, gf.site_ga_stride)
#endif
#define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var, gf.mom_ga_stride)
#define RECONSTRUCT_MATRIX(sign, var) RECONSTRUCT_LINK_12(sign,var)
#define DECLARE_LINK_VARS(var) FloatN var##0, var##1, var##2, var##3, var##4, var##5, var##6, var##7, var##8
#define N_IN_FLOATN 2
#define GAUGE_FORCE_KERN_NAME parity_compute_gauge_force_kernel_dp12
#include "gauge_force_core.h"
#undef LOAD_EVEN_MATRIX
#undef LOAD_ODD_MATRIX
#undef LOAD_ANTI_HERMITIAN
#undef RECONSTRUCT_MATRIX
#undef DECLARE_LINK_VARS
#undef N_IN_FLOATN
#undef GAUGE_FORCE_KERN_NAME
//single precision, 18-reconstruct
#if (GF_SITE_MATRIX_LOAD_TEX == 1)
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_TEX(siteLink0TexSingle, dir, idx, var, gf.site_ga_stride)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_TEX(siteLink1TexSingle, dir, idx, var, gf.site_ga_stride)
#else
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_18(linkEven, dir, idx, var, gf.site_ga_stride)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_18(linkOdd, dir, idx, var, gf.site_ga_stride)
#endif
#define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var,gf.mom_ga_stride)
#define RECONSTRUCT_MATRIX(sign, var)
#define DECLARE_LINK_VARS(var) FloatN var##0, var##1, var##2, var##3, var##4, var##5, var##6, var##7, var##8
#define N_IN_FLOATN 2
#define GAUGE_FORCE_KERN_NAME parity_compute_gauge_force_kernel_sp18
#include "gauge_force_core.h"
#undef LOAD_EVEN_MATRIX
#undef LOAD_ODD_MATRIX
#undef LOAD_ANTI_HERMITIAN
#undef RECONSTRUCT_MATRIX
#undef DECLARE_LINK_VARS
#undef N_IN_FLOATN
#undef GAUGE_FORCE_KERN_NAME
//double precision, 18-reconstruct
#if (GF_SITE_MATRIX_LOAD_TEX == 1)
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_TEX(siteLink0TexDouble, linkEven, dir, idx, var, gf.site_ga_stride)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_TEX(siteLink1TexDouble, linkOdd, dir, idx, var, gf.site_ga_stride)
#else
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_18(linkEven, dir, idx, var, gf.site_ga_stride)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_18(linkOdd, dir, idx, var, gf.site_ga_stride)
#endif
#define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var, gf.mom_ga_stride)
#define RECONSTRUCT_MATRIX(sign, var)
#define DECLARE_LINK_VARS(var) FloatN var##0, var##1, var##2, var##3, var##4, var##5, var##6, var##7, var##8
#define N_IN_FLOATN 2
#define GAUGE_FORCE_KERN_NAME parity_compute_gauge_force_kernel_dp18
#include "gauge_force_core.h"
#undef LOAD_EVEN_MATRIX
#undef LOAD_ODD_MATRIX
#undef LOAD_ANTI_HERMITIAN
#undef RECONSTRUCT_MATRIX
#undef DECLARE_LINK_VARS
#undef N_IN_FLOATN
#undef GAUGE_FORCE_KERN_NAME
void
gauge_force_init_cuda(QudaGaugeParam* param, int path_max_length)
{
static int gauge_force_init_cuda_flag = 0;
if (gauge_force_init_cuda_flag){
return;
}
gauge_force_init_cuda_flag=1;
int* X = param->X;
int Vh = X[0]*X[1]*X[2]*X[3]/2;
fat_force_const_t gf_h;
gf_h.path_max_length = path_max_length;
#ifdef MULTI_GPU
int Vh_ex = (X[0]+4)*(X[1]+4)*(X[2]+4)*(X[3]+4)/2;
gf_h.site_ga_stride = param->site_ga_pad + Vh_ex;
#else
gf_h.site_ga_stride = param->site_ga_pad + Vh;
#endif
gf_h.mom_ga_stride = param->mom_ga_pad + Vh;
cudaMemcpyToSymbol(gf, &gf_h, sizeof(fat_force_const_t));
}
class GaugeForceCuda : public Tunable {
private:
cudaGaugeField &mom;
const int dir;
const double &eb3;
const cudaGaugeField &link;
const int *input_path;
const int *length;
const void *path_coeff;
const int num_paths;
const kernel_param_t &kparam;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
bool advanceBlockDim(TuneParam ¶m) const {
bool rtn = Tunable::advanceBlockDim(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
return rtn;
}
public:
GaugeForceCuda(cudaGaugeField &mom, const int dir, const double &eb3, const cudaGaugeField &link,
const int *input_path, const int *length, const void *path_coeff,
const int num_paths, const kernel_param_t &kparam) :
mom(mom), dir(dir), eb3(eb3), link(link), input_path(input_path), length(length),
path_coeff(path_coeff), num_paths(num_paths), kparam(kparam) {
if(link.Precision() == QUDA_DOUBLE_PRECISION){
cudaBindTexture(0, siteLink0TexDouble, link.Even_p(), link.Bytes()/2);
cudaBindTexture(0, siteLink1TexDouble, link.Odd_p(), link.Bytes()/2);
}else{ //QUDA_SINGLE_PRECISION
if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){
cudaBindTexture(0, siteLink0TexSingle, link.Even_p(), link.Bytes()/2);
cudaBindTexture(0, siteLink1TexSingle, link.Odd_p(), link.Bytes()/2);
}else{//QUDA_RECONSTRUCT_12
cudaBindTexture(0, siteLink0TexSingle_recon, link.Even_p(), link.Bytes()/2);
cudaBindTexture(0, siteLink1TexSingle_recon, link.Odd_p(), link.Bytes()/2);
}
}
}
virtual ~GaugeForceCuda() {
if(link.Precision() == QUDA_DOUBLE_PRECISION){
cudaBindTexture(0, siteLink0TexDouble, link.Even_p(), link.Bytes()/2);
cudaBindTexture(0, siteLink1TexDouble, link.Odd_p(), link.Bytes()/2);
}else{ //QUDA_SINGLE_PRECISION
if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){
cudaBindTexture(0, siteLink0TexSingle, link.Even_p(), link.Bytes()/2);
cudaBindTexture(0, siteLink1TexSingle, link.Odd_p(), link.Bytes()/2);
}else{//QUDA_RECONSTRUCT_12
cudaBindTexture(0, siteLink0TexSingle_recon, link.Even_p(), link.Bytes()/2);
cudaBindTexture(0, siteLink1TexSingle_recon, link.Odd_p(), link.Bytes()/2);
}
}
}
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if(link.Precision() == QUDA_DOUBLE_PRECISION){
if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){
parity_compute_gauge_force_kernel_dp18<0><<<tp.grid, tp.block>>>((double2*)mom.Even_p(), (double2*)mom.Odd_p(),
dir, eb3,
(double2*)link.Even_p(), (double2*)link.Odd_p(),
input_path, length, (double*)path_coeff,
num_paths, kparam);
parity_compute_gauge_force_kernel_dp18<1><<<tp.grid, tp.block>>>((double2*)mom.Even_p(), (double2*)mom.Odd_p(),
dir, eb3,
(double2*)link.Even_p(), (double2*)link.Odd_p(),
input_path, length, (double*)path_coeff,
num_paths, kparam);
}else{ //QUDA_RECONSTRUCT_12
parity_compute_gauge_force_kernel_dp12<0><<<tp.grid, tp.block>>>((double2*)mom.Even_p(), (double2*)mom.Odd_p(),
dir, eb3,
(double2*)link.Even_p(), (double2*)link.Odd_p(),
input_path, length, (double*)path_coeff,
num_paths, kparam);
parity_compute_gauge_force_kernel_dp12<1><<<tp.grid, tp.block>>>((double2*)mom.Even_p(), (double2*)mom.Odd_p(),
dir, eb3,
(double2*)link.Even_p(), (double2*)link.Odd_p(),
input_path, length, (double*)path_coeff,
num_paths, kparam);
}
}else{ //QUDA_SINGLE_PRECISION
if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){
parity_compute_gauge_force_kernel_sp18<0><<<tp.grid, tp.block>>>((float2*)mom.Even_p(), (float2*)mom.Odd_p(),
dir, eb3,
(float2*)link.Even_p(), (float2*)link.Odd_p(),
input_path, length, (float*)path_coeff,
num_paths, kparam);
parity_compute_gauge_force_kernel_sp18<1><<<tp.grid, tp.block>>>((float2*)mom.Even_p(), (float2*)mom.Odd_p(),
dir, eb3,
(float2*)link.Even_p(), (float2*)link.Odd_p(),
input_path, length, (float*)path_coeff,
num_paths, kparam);
}else{ //QUDA_RECONSTRUCT_12
parity_compute_gauge_force_kernel_sp12<0><<<tp.grid, tp.block>>>((float2*)mom.Even_p(), (float2*)mom.Odd_p(),
dir, eb3,
(float4*)link.Even_p(), (float4*)link.Odd_p(),
input_path, length, (float*)path_coeff,
num_paths, kparam);
//odd
/* The reason we do not switch the even/odd function input paramemters and the texture binding
* is that we use the oddbit to decided where to load, in the kernel function
*/
parity_compute_gauge_force_kernel_sp12<1><<<tp.grid, tp.block>>>((float2*)mom.Even_p(), (float2*)mom.Odd_p(),
dir, eb3,
(float4*)link.Even_p(), (float4*)link.Odd_p(),
input_path, length, (float*)path_coeff,
num_paths, kparam);
}
}
}
void preTune() { mom.backup(); }
void postTune() { mom.restore(); }
void initTuneParam(TuneParam ¶m) const {
Tunable::initTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const {
Tunable::defaultTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
long long flops() const { return 0; } // FIXME: add flops counter
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << link.X()[0] << "x";
vol << link.X()[1] << "x";
vol << link.X()[2] << "x";
vol << link.X()[3] << "x";
aux << "threads=" << link.Volume() << ",prec=" << link.Precision();
aux << "stride=" << link.Stride() << ",recon=" << link.Reconstruct();
aux << "dir=" << dir << "num_paths=" << num_paths;
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
};
void
gauge_force_cuda_dir(cudaGaugeField& cudaMom, const int dir, const double eb3, const cudaGaugeField& cudaSiteLink,
const QudaGaugeParam* param, int** input_path, const int* length, const void* path_coeff,
const int num_paths, const int max_length)
{
//input_path
size_t bytes = num_paths*max_length*sizeof(int);
int *input_path_d = (int *) device_malloc(bytes);
cudaMemset(input_path_d, 0, bytes);
checkCudaError();
int* input_path_h = (int *) safe_malloc(bytes);
memset(input_path_h, 0, bytes);
for(int i=0; i < num_paths; i++) {
for(int j=0; j < length[i]; j++) {
input_path_h[i*max_length + j] = input_path[i][j];
}
}
cudaMemcpy(input_path_d, input_path_h, bytes, cudaMemcpyHostToDevice);
//length
int* length_d = (int *) device_malloc(num_paths*sizeof(int));
cudaMemcpy(length_d, length, num_paths*sizeof(int), cudaMemcpyHostToDevice);
//path_coeff
int gsize = param->cuda_prec;
void* path_coeff_d = device_malloc(num_paths*gsize);
cudaMemcpy(path_coeff_d, path_coeff, num_paths*gsize, cudaMemcpyHostToDevice);
//compute the gauge forces
int volume = param->X[0]*param->X[1]*param->X[2]*param->X[3];
kernel_param_t kparam;
#ifdef MULTI_GPU
for(int i=0; i<4; i++) {
kparam.ghostDim[i] = commDimPartitioned(i);
}
#endif
kparam.threads = volume/2;
GaugeForceCuda gaugeForce(cudaMom, dir, eb3, cudaSiteLink, input_path_d,
length_d, path_coeff_d, num_paths, kparam);
gaugeForce.apply(0);
checkCudaError();
host_free(input_path_h);
device_free(input_path_d);
device_free(length_d);
device_free(path_coeff_d);
}
void
gauge_force_cuda(cudaGaugeField& cudaMom, double eb3, cudaGaugeField& cudaSiteLink,
QudaGaugeParam* param, int*** input_path,
int* length, void* path_coeff, int num_paths, int max_length)
{
for(int dir=0; dir < 4; dir++){
gauge_force_cuda_dir(cudaMom, dir, eb3, cudaSiteLink, param, input_path[dir],
length, path_coeff, num_paths, max_length);
}
}
} // namespace quda
|
e8803d8baa0b39349b2eda9c451873803f0a0071.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_view.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/structs/struct_view.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/detail/column_utilities.hpp>
#include <jit/type.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/equal.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/logical.h>
#include <numeric>
#include <sstream>
#include "cudf/detail/utilities/vector_factories.hpp"
#include "rmm/cuda_stream_view.hpp"
namespace cudf {
namespace test {
namespace {
// expand all non-null rows in a list column into a column of child row indices.
std::unique_ptr<column> generate_child_row_indices(lists_column_view const& c,
column_view const& row_indices)
{
// Example input
// List<int32_t>:
// Length : 7
// Offsets : 0, 3, 6, 8, 11, 14, 16, 19
// | | <-- non-null input rows
// Null count: 5
// 0010100
// 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 7, 7
// | | | | | <-- child rows of non-null rows
//
// Desired output: [6, 7, 11, 12, 13]
// compute total # of child row indices we will be emitting.
auto row_size_iter = cudf::detail::make_counting_transform_iterator(
0,
[row_indices = row_indices.begin<size_type>(),
validity = c.null_mask(),
offsets = c.offsets().begin<offset_type>(),
offset = c.offset()] __device__(int index) {
// both null mask and offsets data are not pre-sliced. so we need to add the column offset to
// every incoming index.
auto const true_index = row_indices[index] + offset;
return !validity || cudf::bit_is_set(validity, true_index)
? (offsets[true_index + 1] - offsets[true_index])
: 0;
});
auto const output_size =
thrust::reduce(rmm::exec_policy(), row_size_iter, row_size_iter + row_indices.size());
// no output. done.
auto result =
cudf::make_fixed_width_column(data_type{type_id::INT32}, output_size, mask_state::UNALLOCATED);
if (output_size == 0) { return result; }
// for all input rows, what position in the output column they will start at.
//
// output_row_start = [0, 0, 0, 2, 2, 5, 5]
// | | <-- non-null input rows
//
auto output_row_start = cudf::make_fixed_width_column(
data_type{type_id::INT32}, row_indices.size(), mask_state::UNALLOCATED);
thrust::exclusive_scan(rmm::exec_policy(),
row_size_iter,
row_size_iter + row_indices.size(),
output_row_start->mutable_view().begin<size_type>());
// fill result column with 1s
//
// result = [1, 1, 1, 1, 1]
//
thrust::generate(rmm::exec_policy(),
result->mutable_view().begin<size_type>(),
result->mutable_view().end<size_type>(),
[] __device__() { return 1; });
// scatter the output row positions into result buffer
//
// result = [6, 1, 11, 1, 1]
//
auto validity_iter = cudf::detail::make_counting_transform_iterator(
0,
[row_indices = row_indices.begin<size_type>(),
validity = c.null_mask(),
offset = c.offset()] __device__(int index) {
auto const true_index = row_indices[index] + offset;
return !validity || cudf::bit_is_set(validity, true_index) ? 1 : 0;
});
auto output_row_iter = cudf::detail::make_counting_transform_iterator(
0,
[row_indices = row_indices.begin<size_type>(),
offsets = c.offsets().begin<offset_type>(),
offset = c.offset(),
first_offset = cudf::detail::get_value<offset_type>(
c.offsets(), c.offset(), rmm::cuda_stream_default)] __device__(int index) {
auto const true_index = row_indices[index] + offset;
return offsets[true_index] - first_offset;
});
thrust::scatter_if(rmm::exec_policy(),
output_row_iter,
output_row_iter + row_indices.size(),
output_row_start->view().begin<size_type>(),
validity_iter,
result->mutable_view().begin<size_type>());
// generate keys for each output row
//
// result = [1, 1, 2, 2, 2]
//
auto keys =
cudf::make_fixed_width_column(data_type{type_id::INT32}, output_size, mask_state::UNALLOCATED);
thrust::generate(rmm::exec_policy(),
keys->mutable_view().begin<size_type>(),
keys->mutable_view().end<size_type>(),
[] __device__() { return 0; });
thrust::scatter_if(rmm::exec_policy(),
validity_iter,
validity_iter + row_indices.size(),
output_row_start->view().begin<size_type>(),
validity_iter,
keys->mutable_view().begin<size_type>());
thrust::inclusive_scan(rmm::exec_policy(),
keys->view().begin<size_type>(),
keys->view().end<size_type>(),
keys->mutable_view().begin<size_type>());
// scan by key to generate final child row indices.
// input
// result = [6, 1, 11, 1, 1]
// keys = [1, 1, 2, 2, 2]
//
// output
// result = [6, 7, 11, 12, 13]
//
thrust::inclusive_scan_by_key(rmm::exec_policy(),
keys->view().begin<size_type>(),
keys->view().end<size_type>(),
result->view().begin<size_type>(),
result->mutable_view().begin<size_type>());
return result;
}
#define PROP_EXPECT_EQ(a, b) \
do { \
if (verbosity == debug_output_level::QUIET) { \
if (a != b) { return false; } \
} else { \
EXPECT_EQ(a, b); \
if (a != b) { \
if (verbosity == debug_output_level::FIRST_ERROR) { \
return false; \
} else { \
result = false; \
} \
} \
} \
} while (0)
template <bool check_exact_equality>
struct column_property_comparator {
bool types_equivalent(cudf::data_type const& lhs, cudf::data_type const& rhs)
{
return is_fixed_point(lhs) ? lhs.id() == rhs.id() : lhs == rhs;
}
size_type count_nulls(cudf::column_view const& c, cudf::column_view const& row_indices)
{
auto validity_iter = cudf::detail::make_counting_transform_iterator(
0,
[row_indices = row_indices.begin<size_type>(),
validity = c.null_mask(),
offset = c.offset()] __device__(int index) {
// both null mask and offsets data are not pre-sliced. so we need to add the column offset
// to every incoming index.
auto const true_index = row_indices[index] + offset;
return !validity || cudf::bit_is_set(validity, true_index) ? 0 : 1;
});
return thrust::reduce(rmm::exec_policy(), validity_iter, validity_iter + row_indices.size());
}
bool compare_common(cudf::column_view const& lhs,
cudf::column_view const& rhs,
cudf::column_view const& lhs_row_indices,
cudf::column_view const& rhs_row_indices,
debug_output_level verbosity)
{
bool result = true;
if (check_exact_equality) {
PROP_EXPECT_EQ(lhs.type(), rhs.type());
} else {
PROP_EXPECT_EQ(types_equivalent(lhs.type(), rhs.type()), true);
}
// DISCUSSION: does this make sense, semantically?
auto const lhs_size = check_exact_equality ? lhs.size() : lhs_row_indices.size();
auto const rhs_size = check_exact_equality ? rhs.size() : rhs_row_indices.size();
PROP_EXPECT_EQ(lhs_size, rhs_size);
if (lhs_size > 0 && check_exact_equality) { PROP_EXPECT_EQ(lhs.nullable(), rhs.nullable()); }
// DISCUSSION: does this make sense, semantically?
auto const lhs_null_count =
check_exact_equality ? lhs.null_count() : count_nulls(lhs, lhs_row_indices);
auto const rhs_null_count =
check_exact_equality ? rhs.null_count() : count_nulls(rhs, rhs_row_indices);
PROP_EXPECT_EQ(lhs_null_count, rhs_null_count);
// equivalent, but not exactly equal columns can have a different number of children if their
// sizes are both 0. Specifically, empty string columns may or may not have children.
if (check_exact_equality || (lhs.size() > 0 && lhs.null_count() < lhs.size())) {
PROP_EXPECT_EQ(lhs.num_children(), rhs.num_children());
}
return result;
}
template <typename T,
std::enable_if_t<!std::is_same<T, cudf::list_view>::value &&
!std::is_same<T, cudf::struct_view>::value>* = nullptr>
bool operator()(cudf::column_view const& lhs,
cudf::column_view const& rhs,
cudf::column_view const& lhs_row_indices,
cudf::column_view const& rhs_row_indices,
debug_output_level verbosity)
{
return compare_common(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity);
}
template <typename T, std::enable_if_t<std::is_same<T, cudf::list_view>::value>* = nullptr>
bool operator()(cudf::column_view const& lhs,
cudf::column_view const& rhs,
cudf::column_view const& lhs_row_indices,
cudf::column_view const& rhs_row_indices,
debug_output_level verbosity)
{
if (!compare_common(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity)) { return false; }
cudf::lists_column_view lhs_l(lhs);
cudf::lists_column_view rhs_l(rhs);
// recurse
auto lhs_child = lhs_l.get_sliced_child(rmm::cuda_stream_default);
// note: if a column is all nulls or otherwise empty, no indices are generated and no recursion
// happens
auto lhs_child_indices = generate_child_row_indices(lhs_l, lhs_row_indices);
if (lhs_child_indices->size() > 0) {
auto rhs_child = rhs_l.get_sliced_child(rmm::cuda_stream_default);
auto rhs_child_indices = generate_child_row_indices(rhs_l, rhs_row_indices);
return cudf::type_dispatcher(lhs_child.type(),
column_property_comparator<check_exact_equality>{},
lhs_child,
rhs_child,
*lhs_child_indices,
*rhs_child_indices,
verbosity);
}
return true;
}
template <typename T, std::enable_if_t<std::is_same<T, cudf::struct_view>::value>* = nullptr>
bool operator()(cudf::column_view const& lhs,
cudf::column_view const& rhs,
cudf::column_view const& lhs_row_indices,
cudf::column_view const& rhs_row_indices,
debug_output_level verbosity)
{
if (!compare_common(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity)) { return false; }
structs_column_view l_scv(lhs);
structs_column_view r_scv(rhs);
for (size_type i = 0; i < lhs.num_children(); i++) {
column_view lhs_child = l_scv.get_sliced_child(i);
column_view rhs_child = r_scv.get_sliced_child(i);
if (!cudf::type_dispatcher(lhs_child.type(),
column_property_comparator<check_exact_equality>{},
lhs_child,
rhs_child,
lhs_row_indices,
rhs_row_indices,
verbosity)) {
return false;
}
}
return true;
}
};
class corresponding_rows_unequal {
public:
corresponding_rows_unequal(table_device_view d_lhs,
table_device_view d_rhs,
column_device_view lhs_row_indices_,
column_device_view rhs_row_indices_)
: comp(d_lhs, d_rhs), lhs_row_indices(lhs_row_indices_), rhs_row_indices(rhs_row_indices_)
{
}
cudf::row_equality_comparator<true> comp;
__device__ bool operator()(size_type index)
{
return !comp(lhs_row_indices.element<size_type>(index),
rhs_row_indices.element<size_type>(index));
}
column_device_view lhs_row_indices;
column_device_view rhs_row_indices;
};
class corresponding_rows_not_equivalent {
table_device_view d_lhs;
table_device_view d_rhs;
column_device_view lhs_row_indices;
column_device_view rhs_row_indices;
public:
corresponding_rows_not_equivalent(table_device_view d_lhs,
table_device_view d_rhs,
column_device_view lhs_row_indices_,
column_device_view rhs_row_indices_)
: d_lhs(d_lhs),
d_rhs(d_rhs),
comp(d_lhs, d_rhs),
lhs_row_indices(lhs_row_indices_),
rhs_row_indices(rhs_row_indices_)
{
CUDF_EXPECTS(d_lhs.num_columns() == 1 and d_rhs.num_columns() == 1,
"Unsupported number of columns");
}
struct typed_element_not_equivalent {
template <typename T>
__device__ std::enable_if_t<std::is_floating_point<T>::value, bool> operator()(
column_device_view const& lhs,
column_device_view const& rhs,
size_type lhs_index,
size_type rhs_index)
{
if (lhs.is_valid(lhs_index) and rhs.is_valid(rhs_index)) {
T const x = lhs.element<T>(lhs_index);
T const y = rhs.element<T>(rhs_index);
// Must handle inf and nan separately
if (std::isinf(x) || std::isinf(y)) {
return x != y; // comparison of (inf==inf) returns true
} else if (std::isnan(x) || std::isnan(y)) {
return std::isnan(x) != std::isnan(y); // comparison of (nan==nan) returns false
} else {
constexpr int ulp = 4; // ulp = unit of least precision, value taken from google test
T const abs_x_minus_y = std::abs(x - y);
return abs_x_minus_y >= std::numeric_limits<T>::min() &&
abs_x_minus_y > std::numeric_limits<T>::epsilon() * std::abs(x + y) * ulp;
}
} else {
// if either is null, then the inequality was checked already
return true;
}
}
template <typename T, typename... Args>
__device__ std::enable_if_t<not std::is_floating_point<T>::value, bool> operator()(Args...)
{
// Non-floating point inequality is checked already
return true;
}
};
cudf::row_equality_comparator<true> comp;
__device__ bool operator()(size_type index)
{
auto const lhs_index = lhs_row_indices.element<size_type>(index);
auto const rhs_index = rhs_row_indices.element<size_type>(index);
if (not comp(lhs_index, rhs_index)) {
auto lhs_col = this->d_lhs.column(0);
auto rhs_col = this->d_rhs.column(0);
return type_dispatcher(
lhs_col.type(), typed_element_not_equivalent{}, lhs_col, rhs_col, lhs_index, rhs_index);
}
return false;
}
};
// Stringify the inconsistent values resulted from the comparison of two columns element-wise
std::string stringify_column_differences(cudf::device_span<int const> differences,
column_view const& lhs,
column_view const& rhs,
column_view const& lhs_row_indices,
column_view const& rhs_row_indices,
debug_output_level verbosity,
int depth)
{
CUDF_EXPECTS(not differences.empty(), "Shouldn't enter this function if `differences` is empty");
std::string const depth_str = depth > 0 ? "depth " + std::to_string(depth) + '\n' : "";
// move the differences to the host.
auto h_differences = cudf::detail::make_host_vector_sync(differences);
if (verbosity == debug_output_level::ALL_ERRORS) {
std::ostringstream buffer;
buffer << depth_str << "differences:" << std::endl;
auto source_table = cudf::table_view({lhs, rhs});
auto diff_column =
fixed_width_column_wrapper<int32_t>(h_differences.begin(), h_differences.end());
auto diff_table = cudf::gather(source_table, diff_column);
// Need to pull back the differences
auto const h_left_strings = to_strings(diff_table->get_column(0));
auto const h_right_strings = to_strings(diff_table->get_column(1));
for (size_t i = 0; i < h_differences.size(); ++i)
buffer << depth_str << "lhs[" << h_differences[i] << "] = " << h_left_strings[i] << ", rhs["
<< h_differences[i] << "] = " << h_right_strings[i] << std::endl;
return buffer.str();
} else {
auto const index = h_differences[0]; // only stringify first difference
auto const lhs_index =
cudf::detail::get_value<size_type>(lhs_row_indices, index, rmm::cuda_stream_default);
auto const rhs_index =
cudf::detail::get_value<size_type>(rhs_row_indices, index, rmm::cuda_stream_default);
auto diff_lhs = cudf::detail::slice(lhs, lhs_index, lhs_index + 1);
auto diff_rhs = cudf::detail::slice(rhs, rhs_index, rhs_index + 1);
return depth_str + "first difference: " + "lhs[" + std::to_string(index) +
"] = " + to_string(diff_lhs, "") + ", rhs[" + std::to_string(index) +
"] = " + to_string(diff_rhs, "");
}
}
// non-nested column types
template <typename T, bool check_exact_equality>
struct column_comparator_impl {
bool operator()(column_view const& lhs,
column_view const& rhs,
column_view const& lhs_row_indices,
column_view const& rhs_row_indices,
debug_output_level verbosity,
int depth)
{
auto d_lhs = cudf::table_device_view::create(table_view{{lhs}});
auto d_rhs = cudf::table_device_view::create(table_view{{rhs}});
auto d_lhs_row_indices = cudf::column_device_view::create(lhs_row_indices);
auto d_rhs_row_indices = cudf::column_device_view::create(rhs_row_indices);
using ComparatorType = std::conditional_t<check_exact_equality,
corresponding_rows_unequal,
corresponding_rows_not_equivalent>;
auto differences = rmm::device_uvector<int>(
lhs.size(), rmm::cuda_stream_default); // worst case: everything different
auto input_iter = thrust::make_counting_iterator(0);
auto diff_iter =
thrust::copy_if(rmm::exec_policy(),
input_iter,
input_iter + lhs_row_indices.size(),
differences.begin(),
ComparatorType(*d_lhs, *d_rhs, *d_lhs_row_indices, *d_rhs_row_indices));
differences.resize(thrust::distance(differences.begin(), diff_iter),
rmm::cuda_stream_default); // shrink back down
if (not differences.is_empty()) {
if (verbosity != debug_output_level::QUIET) {
// GTEST_FAIL() does a return that conflicts with our return type. so hide it in a lambda.
[&]() {
GTEST_FAIL() << stringify_column_differences(
differences, lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity, depth);
}();
}
return false;
}
return true;
}
};
// forward declaration for nested-type recursion.
template <bool check_exact_equality>
struct column_comparator;
// specialization for list columns
template <bool check_exact_equality>
struct column_comparator_impl<list_view, check_exact_equality> {
bool operator()(column_view const& lhs,
column_view const& rhs,
column_view const& lhs_row_indices,
column_view const& rhs_row_indices,
debug_output_level verbosity,
int depth)
{
lists_column_view lhs_l(lhs);
lists_column_view rhs_l(rhs);
CUDF_EXPECTS(lhs_row_indices.size() == rhs_row_indices.size(), "List column size mismatch");
if (lhs_row_indices.is_empty()) { return true; }
// worst case - everything is different
rmm::device_uvector<int> differences(lhs_row_indices.size(), rmm::cuda_stream_default);
// compare offsets, taking slicing into account
// left side
size_type lhs_shift =
cudf::detail::get_value<size_type>(lhs_l.offsets(), lhs_l.offset(), rmm::cuda_stream_default);
auto lhs_offsets = thrust::make_transform_iterator(
lhs_l.offsets().begin<size_type>() + lhs_l.offset(),
[lhs_shift] __device__(size_type offset) { return offset - lhs_shift; });
auto lhs_valids = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[mask = lhs_l.null_mask(), offset = lhs_l.offset()] __device__(size_type index) {
return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset);
});
// right side
size_type rhs_shift =
cudf::detail::get_value<size_type>(rhs_l.offsets(), rhs_l.offset(), rmm::cuda_stream_default);
auto rhs_offsets = thrust::make_transform_iterator(
rhs_l.offsets().begin<size_type>() + rhs_l.offset(),
[rhs_shift] __device__(size_type offset) { return offset - rhs_shift; });
auto rhs_valids = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[mask = rhs_l.null_mask(), offset = rhs_l.offset()] __device__(size_type index) {
return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset);
});
// when checking for equivalency, we can't compare offset values directly, we can only
// compare lengths of the rows, and only if valid. as a concrete example, you could have two
// equivalent columns with the following data:
//
// column A
// offsets = [0, 3, 5, 7]
// validity = [0, 1, 1, 1]
//
// column B
// offsets = [0, 0, 2, 4]
// validity = [0, 1, 1, 1]
//
// Row 0 in column A happens to have a positive length, even though the row is null, but column
// B does not. So the offsets for the remaining valid rows are fundamentally different even
// though the row lengths are the same.
//
auto input_iter = thrust::make_counting_iterator(0);
auto diff_iter = thrust::copy_if(
rmm::exec_policy(),
input_iter,
input_iter + lhs_row_indices.size(),
differences.begin(),
[lhs_offsets,
rhs_offsets,
lhs_valids,
rhs_valids,
lhs_indices = lhs_row_indices.begin<size_type>(),
rhs_indices = rhs_row_indices.begin<size_type>()] __device__(size_type index) {
auto const lhs_index = lhs_indices[index];
auto const rhs_index = rhs_indices[index];
// check for validity match
if (lhs_valids[lhs_index] != rhs_valids[rhs_index]) { return true; }
// if the row is valid, check that the length of the list is the same. do this
// for both the equivalency and exact equality checks.
if (lhs_valids[lhs_index] && ((lhs_offsets[lhs_index + 1] - lhs_offsets[lhs_index]) !=
(rhs_offsets[rhs_index + 1] - rhs_offsets[rhs_index]))) {
return true;
}
// if validity matches -and- is false, we can ignore the actual offset values. this
// is technically not checking "equal()", but it's how the non-list code path handles it
if (!lhs_valids[lhs_index]) { return false; }
// if checking exact equality, compare the actual offset values
if (check_exact_equality && lhs_offsets[lhs_index] != rhs_offsets[rhs_index]) {
return true;
}
return false;
});
differences.resize(thrust::distance(differences.begin(), diff_iter),
rmm::cuda_stream_default); // shrink back down
if (not differences.is_empty()) {
if (verbosity != debug_output_level::QUIET) {
// GTEST_FAIL() does a return that conflicts with our return type. so hide it in a lambda.
[&]() {
GTEST_FAIL() << stringify_column_differences(
differences, lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity, depth);
}();
}
return false;
}
// recurse.
auto lhs_child = lhs_l.get_sliced_child(rmm::cuda_stream_default);
// note: if a column is all nulls or otherwise empty, no indices are generated and no recursion
// happens
auto lhs_child_indices = generate_child_row_indices(lhs_l, lhs_row_indices);
if (lhs_child_indices->size() > 0) {
auto rhs_child = rhs_l.get_sliced_child(rmm::cuda_stream_default);
auto rhs_child_indices = generate_child_row_indices(rhs_l, rhs_row_indices);
return cudf::type_dispatcher(lhs_child.type(),
column_comparator<check_exact_equality>{},
lhs_child,
rhs_child,
*lhs_child_indices,
*rhs_child_indices,
verbosity,
depth + 1);
}
return true;
}
};
template <bool check_exact_equality>
struct column_comparator_impl<struct_view, check_exact_equality> {
bool operator()(column_view const& lhs,
column_view const& rhs,
column_view const& lhs_row_indices,
column_view const& rhs_row_indices,
debug_output_level verbosity,
int depth)
{
structs_column_view l_scv(lhs);
structs_column_view r_scv(rhs);
for (size_type i = 0; i < lhs.num_children(); i++) {
column_view lhs_child = l_scv.get_sliced_child(i);
column_view rhs_child = r_scv.get_sliced_child(i);
if (!cudf::type_dispatcher(lhs_child.type(),
column_comparator<check_exact_equality>{},
lhs_child,
rhs_child,
lhs_row_indices,
rhs_row_indices,
verbosity,
depth + 1)) {
return false;
}
}
return true;
}
};
template <bool check_exact_equality>
struct column_comparator {
template <typename T>
bool operator()(column_view const& lhs,
column_view const& rhs,
column_view const& lhs_row_indices,
column_view const& rhs_row_indices,
debug_output_level verbosity,
int depth = 0)
{
CUDF_EXPECTS(lhs_row_indices.size() == rhs_row_indices.size(),
"Mismatch in row counts to compare");
// compare properties
if (!cudf::type_dispatcher(lhs.type(),
column_property_comparator<check_exact_equality>{},
lhs,
rhs,
lhs_row_indices,
rhs_row_indices,
verbosity)) {
return false;
}
// compare values
column_comparator_impl<T, check_exact_equality> comparator{};
return comparator(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity, depth);
}
};
std::unique_ptr<column> generate_all_row_indices(size_type num_rows)
{
auto indices =
cudf::make_fixed_width_column(data_type{type_id::INT32}, num_rows, mask_state::UNALLOCATED);
thrust::sequence(rmm::exec_policy(),
indices->mutable_view().begin<size_type>(),
indices->mutable_view().end<size_type>(),
0);
return indices;
}
} // namespace
/**
* @copydoc cudf::test::expect_column_properties_equal
*/
bool expect_column_properties_equal(column_view const& lhs,
column_view const& rhs,
debug_output_level verbosity)
{
auto indices = generate_all_row_indices(lhs.size());
return cudf::type_dispatcher(
lhs.type(), column_property_comparator<true>{}, lhs, rhs, *indices, *indices, verbosity);
}
/**
* @copydoc cudf::test::expect_column_properties_equivalent
*/
bool expect_column_properties_equivalent(column_view const& lhs,
column_view const& rhs,
debug_output_level verbosity)
{
auto indices = generate_all_row_indices(lhs.size());
return cudf::type_dispatcher(
lhs.type(), column_property_comparator<false>{}, lhs, rhs, *indices, *indices, verbosity);
}
/**
* @copydoc cudf::test::expect_columns_equal
*/
bool expect_columns_equal(cudf::column_view const& lhs,
cudf::column_view const& rhs,
debug_output_level verbosity)
{
auto indices = generate_all_row_indices(lhs.size());
return cudf::type_dispatcher(
lhs.type(), column_comparator<true>{}, lhs, rhs, *indices, *indices, verbosity);
}
/**
* @copydoc cudf::test::expect_columns_equivalent
*/
bool expect_columns_equivalent(cudf::column_view const& lhs,
cudf::column_view const& rhs,
debug_output_level verbosity)
{
auto indices = generate_all_row_indices(lhs.size());
return cudf::type_dispatcher(
lhs.type(), column_comparator<false>{}, lhs, rhs, *indices, *indices, verbosity);
}
/**
* @copydoc cudf::test::expect_equal_buffers
*/
void expect_equal_buffers(void const* lhs, void const* rhs, std::size_t size_bytes)
{
if (size_bytes > 0) {
EXPECT_NE(nullptr, lhs);
EXPECT_NE(nullptr, rhs);
}
auto typed_lhs = static_cast<char const*>(lhs);
auto typed_rhs = static_cast<char const*>(rhs);
EXPECT_TRUE(thrust::equal(thrust::device, typed_lhs, typed_lhs + size_bytes, typed_rhs));
}
/**
* @copydoc cudf::test::bitmask_to_host
*/
std::vector<bitmask_type> bitmask_to_host(cudf::column_view const& c)
{
if (c.nullable()) {
auto num_bitmasks = bitmask_allocation_size_bytes(c.size()) / sizeof(bitmask_type);
std::vector<bitmask_type> host_bitmask(num_bitmasks);
if (c.offset() == 0) {
CUDA_TRY(hipMemcpy(host_bitmask.data(),
c.null_mask(),
num_bitmasks * sizeof(bitmask_type),
hipMemcpyDeviceToHost));
} else {
auto mask = copy_bitmask(c.null_mask(), c.offset(), c.offset() + c.size());
CUDA_TRY(hipMemcpy(host_bitmask.data(),
mask.data(),
num_bitmasks * sizeof(bitmask_type),
hipMemcpyDeviceToHost));
}
return host_bitmask;
} else {
return std::vector<bitmask_type>{};
}
}
namespace {
template <typename T, typename std::enable_if_t<std::is_integral<T>::value>* = nullptr>
static auto numeric_to_string_precise(T value)
{
return std::to_string(value);
}
template <typename T, typename std::enable_if_t<std::is_floating_point<T>::value>* = nullptr>
static auto numeric_to_string_precise(T value)
{
std::ostringstream o;
o << std::setprecision(std::numeric_limits<T>::max_digits10) << value;
return o.str();
}
static auto duration_suffix(cudf::duration_D) { return " days"; }
static auto duration_suffix(cudf::duration_s) { return " seconds"; }
static auto duration_suffix(cudf::duration_ms) { return " milliseconds"; }
static auto duration_suffix(cudf::duration_us) { return " microseconds"; }
static auto duration_suffix(cudf::duration_ns) { return " nanoseconds"; }
std::string get_nested_type_str(cudf::column_view const& view)
{
if (view.type().id() == cudf::type_id::LIST) {
lists_column_view lcv(view);
return cudf::jit::get_type_name(view.type()) + "<" + (get_nested_type_str(lcv.child())) + ">";
}
if (view.type().id() == cudf::type_id::STRUCT) {
std::ostringstream out;
out << cudf::jit::get_type_name(view.type()) + "<";
std::transform(view.child_begin(),
view.child_end(),
std::ostream_iterator<std::string>(out, ","),
[&out](auto const col) { return get_nested_type_str(col); });
out << ">";
return out.str();
}
return cudf::jit::get_type_name(view.type());
}
template <typename NestedColumnView>
std::string nested_offsets_to_string(NestedColumnView const& c, std::string const& delimiter = ", ")
{
column_view offsets = (c.parent()).child(NestedColumnView::offsets_column_index);
CUDF_EXPECTS(offsets.type().id() == type_id::INT32,
"Column does not appear to be an offsets column");
CUDF_EXPECTS(offsets.offset() == 0, "Offsets column has an internal offset!");
size_type output_size = c.size() + 1;
// the first offset value to normalize everything against
size_type first =
cudf::detail::get_value<size_type>(offsets, c.offset(), rmm::cuda_stream_default);
rmm::device_uvector<size_type> shifted_offsets(output_size, rmm::cuda_stream_default);
// normalize the offset values for the column offset
size_type const* d_offsets = offsets.head<size_type>() + c.offset();
thrust::transform(
rmm::exec_policy(),
d_offsets,
d_offsets + output_size,
shifted_offsets.begin(),
[first] __device__(int32_t offset) { return static_cast<size_type>(offset - first); });
auto const h_shifted_offsets = cudf::detail::make_host_vector_sync(shifted_offsets);
std::ostringstream buffer;
for (size_t idx = 0; idx < h_shifted_offsets.size(); idx++) {
buffer << h_shifted_offsets[idx];
if (idx < h_shifted_offsets.size() - 1) { buffer << delimiter; }
}
return buffer.str();
}
struct column_view_printer {
template <typename Element, typename std::enable_if_t<is_numeric<Element>()>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&)
{
auto h_data = cudf::test::to_host<Element>(col);
out.resize(col.size());
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return bit_is_set(h_data.second.data(), idx)
? numeric_to_string_precise(h_data.first[idx])
: std::string("NULL");
});
} else {
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) {
return numeric_to_string_precise(el);
});
}
}
template <typename Element, typename std::enable_if_t<is_timestamp<Element>()>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
//
// For timestamps, convert timestamp column to column of strings, then
// call string version
//
auto col_as_strings = cudf::strings::from_timestamps(col);
if (col_as_strings->size() == 0) { return; }
this->template operator()<cudf::string_view>(*col_as_strings, out, indent);
}
template <typename Element, typename std::enable_if_t<cudf::is_fixed_point<Element>()>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&)
{
auto const h_data = cudf::test::to_host<Element>(col);
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
std::back_inserter(out),
[&h_data](auto idx) {
return h_data.second.empty() || bit_is_set(h_data.second.data(), idx)
? static_cast<std::string>(h_data.first[idx])
: std::string("NULL");
});
} else {
std::transform(std::cbegin(h_data.first),
std::cend(h_data.first),
std::back_inserter(out),
[col](auto const& fp) { return static_cast<std::string>(fp); });
}
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::string_view>::value>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&)
{
//
// Implementation for strings, call special to_host variant
//
if (col.is_empty()) return;
auto h_data = cudf::test::to_host<std::string>(col);
out.resize(col.size());
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return h_data.second.empty() || bit_is_set(h_data.second.data(), idx)
? h_data.first[idx]
: std::string("NULL");
});
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::dictionary32>::value>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&)
{
cudf::dictionary_column_view dictionary(col);
if (col.is_empty()) return;
std::vector<std::string> keys = to_strings(dictionary.keys());
std::vector<std::string> indices = to_strings({dictionary.indices().type(),
dictionary.size(),
dictionary.indices().head(),
dictionary.null_mask(),
dictionary.null_count(),
dictionary.offset()});
out.insert(out.end(), keys.begin(), keys.end());
if (!indices.empty()) {
std::string first = "\x08 : " + indices.front(); // use : as delimiter
out.push_back(first); // between keys and indices
out.insert(out.end(), indices.begin() + 1, indices.end());
}
}
// Print the tick counts with the units
template <typename Element, typename std::enable_if_t<is_duration<Element>()>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&)
{
auto h_data = cudf::test::to_host<Element>(col);
out.resize(col.size());
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return bit_is_set(h_data.second.data(), idx)
? numeric_to_string_precise(h_data.first[idx].count()) +
duration_suffix(h_data.first[idx])
: std::string("NULL");
});
} else {
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) {
return numeric_to_string_precise(el.count()) + duration_suffix(el);
});
}
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::list_view>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
lists_column_view lcv(col);
// propagate slicing to the child if necessary
column_view child = lcv.get_sliced_child(rmm::cuda_stream_default);
bool const is_sliced = lcv.offset() > 0 || child.offset() > 0;
std::string tmp =
get_nested_type_str(col) + (is_sliced ? "(sliced)" : "") + ":\n" + indent +
"Length : " + std::to_string(lcv.size()) + "\n" + indent +
"Offsets : " + (lcv.size() > 0 ? nested_offsets_to_string(lcv) : "") + "\n" +
(lcv.parent().nullable()
? indent + "Null count: " + std::to_string(lcv.null_count()) + "\n" +
detail::to_string(bitmask_to_host(col), col.size(), indent) + "\n"
: "") +
// non-nested types don't typically display their null masks, so do it here for convenience.
(!is_nested(child.type()) && child.nullable()
? " " + detail::to_string(bitmask_to_host(child), child.size(), indent) + "\n"
: "") +
(detail::to_string(child, ", ", indent + " ")) + "\n";
out.push_back(tmp);
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::struct_view>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
structs_column_view view{col};
std::ostringstream out_stream;
out_stream << get_nested_type_str(col) << ":\n"
<< indent << "Length : " << view.size() << ":\n";
if (view.nullable()) {
out_stream << indent << "Null count: " << view.null_count() << "\n"
<< detail::to_string(bitmask_to_host(col), col.size(), indent) << "\n";
}
auto iter = thrust::make_counting_iterator(0);
std::transform(
iter,
iter + view.num_children(),
std::ostream_iterator<std::string>(out_stream, "\n"),
[&](size_type index) {
auto child = view.get_sliced_child(index);
// non-nested types don't typically display their null masks, so do it here for convenience.
return (!is_nested(child.type()) && child.nullable()
? " " + detail::to_string(bitmask_to_host(child), child.size(), indent) + "\n"
: "") +
detail::to_string(child, ", ", indent + " ");
});
out.push_back(out_stream.str());
}
};
} // namespace
namespace detail {
/**
* @copydoc cudf::test::detail::to_strings
*/
std::vector<std::string> to_strings(cudf::column_view const& col, std::string const& indent)
{
std::vector<std::string> reply;
cudf::type_dispatcher(col.type(), column_view_printer{}, col, reply, indent);
return reply;
}
/**
* @copydoc cudf::test::detail::to_string(cudf::column_view, std::string, std::string)
*
* @param indent Indentation for all output
*/
std::string to_string(cudf::column_view const& col,
std::string const& delimiter,
std::string const& indent)
{
std::ostringstream buffer;
std::vector<std::string> h_data = to_strings(col, indent);
buffer << indent;
std::copy(h_data.begin(),
h_data.end() - (!h_data.empty()),
std::ostream_iterator<std::string>(buffer, delimiter.c_str()));
if (!h_data.empty()) buffer << h_data.back();
return buffer.str();
}
/**
* @copydoc cudf::test::detail::to_string(std::vector<bitmask_type>, size_type, std::string)
*
* @param indent Indentation for all output. See comment in `to_strings` for
* a detailed description.
*/
std::string to_string(std::vector<bitmask_type> const& null_mask,
size_type null_mask_size,
std::string const& indent)
{
std::ostringstream buffer;
buffer << indent;
for (int idx = null_mask_size - 1; idx >= 0; idx--) {
buffer << (cudf::bit_is_set(null_mask.data(), idx) ? "1" : "0");
}
return buffer.str();
}
} // namespace detail
/**
* @copydoc cudf::test::to_strings
*/
std::vector<std::string> to_strings(cudf::column_view const& col)
{
return detail::to_strings(col);
}
/**
* @copydoc cudf::test::to_string(cudf::column_view, std::string)
*/
std::string to_string(cudf::column_view const& col, std::string const& delimiter)
{
return detail::to_string(col, delimiter);
}
/**
* @copydoc cudf::test::to_string(std::vector<bitmask_type>, size_type)
*/
std::string to_string(std::vector<bitmask_type> const& null_mask, size_type null_mask_size)
{
return detail::to_string(null_mask, null_mask_size);
}
/**
* @copydoc cudf::test::print
*/
void print(cudf::column_view const& col, std::ostream& os, std::string const& delimiter)
{
os << to_string(col, delimiter) << std::endl;
}
/**
* @copydoc cudf::test::validate_host_masks
*/
bool validate_host_masks(std::vector<bitmask_type> const& expected_mask,
std::vector<bitmask_type> const& got_mask,
size_type number_of_elements)
{
return std::all_of(thrust::make_counting_iterator(0),
thrust::make_counting_iterator(number_of_elements),
[&expected_mask, &got_mask](auto index) {
return cudf::bit_is_set(expected_mask.data(), index) ==
cudf::bit_is_set(got_mask.data(), index);
});
}
} // namespace test
} // namespace cudf
|
e8803d8baa0b39349b2eda9c451873803f0a0071.cu
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_view.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/structs/struct_view.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/detail/column_utilities.hpp>
#include <jit/type.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/equal.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/logical.h>
#include <numeric>
#include <sstream>
#include "cudf/detail/utilities/vector_factories.hpp"
#include "rmm/cuda_stream_view.hpp"
namespace cudf {
namespace test {
namespace {
// expand all non-null rows in a list column into a column of child row indices.
std::unique_ptr<column> generate_child_row_indices(lists_column_view const& c,
column_view const& row_indices)
{
// Example input
// List<int32_t>:
// Length : 7
// Offsets : 0, 3, 6, 8, 11, 14, 16, 19
// | | <-- non-null input rows
// Null count: 5
// 0010100
// 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 7, 7
// | | | | | <-- child rows of non-null rows
//
// Desired output: [6, 7, 11, 12, 13]
// compute total # of child row indices we will be emitting.
auto row_size_iter = cudf::detail::make_counting_transform_iterator(
0,
[row_indices = row_indices.begin<size_type>(),
validity = c.null_mask(),
offsets = c.offsets().begin<offset_type>(),
offset = c.offset()] __device__(int index) {
// both null mask and offsets data are not pre-sliced. so we need to add the column offset to
// every incoming index.
auto const true_index = row_indices[index] + offset;
return !validity || cudf::bit_is_set(validity, true_index)
? (offsets[true_index + 1] - offsets[true_index])
: 0;
});
auto const output_size =
thrust::reduce(rmm::exec_policy(), row_size_iter, row_size_iter + row_indices.size());
// no output. done.
auto result =
cudf::make_fixed_width_column(data_type{type_id::INT32}, output_size, mask_state::UNALLOCATED);
if (output_size == 0) { return result; }
// for all input rows, what position in the output column they will start at.
//
// output_row_start = [0, 0, 0, 2, 2, 5, 5]
// | | <-- non-null input rows
//
auto output_row_start = cudf::make_fixed_width_column(
data_type{type_id::INT32}, row_indices.size(), mask_state::UNALLOCATED);
thrust::exclusive_scan(rmm::exec_policy(),
row_size_iter,
row_size_iter + row_indices.size(),
output_row_start->mutable_view().begin<size_type>());
// fill result column with 1s
//
// result = [1, 1, 1, 1, 1]
//
thrust::generate(rmm::exec_policy(),
result->mutable_view().begin<size_type>(),
result->mutable_view().end<size_type>(),
[] __device__() { return 1; });
// scatter the output row positions into result buffer
//
// result = [6, 1, 11, 1, 1]
//
auto validity_iter = cudf::detail::make_counting_transform_iterator(
0,
[row_indices = row_indices.begin<size_type>(),
validity = c.null_mask(),
offset = c.offset()] __device__(int index) {
auto const true_index = row_indices[index] + offset;
return !validity || cudf::bit_is_set(validity, true_index) ? 1 : 0;
});
auto output_row_iter = cudf::detail::make_counting_transform_iterator(
0,
[row_indices = row_indices.begin<size_type>(),
offsets = c.offsets().begin<offset_type>(),
offset = c.offset(),
first_offset = cudf::detail::get_value<offset_type>(
c.offsets(), c.offset(), rmm::cuda_stream_default)] __device__(int index) {
auto const true_index = row_indices[index] + offset;
return offsets[true_index] - first_offset;
});
thrust::scatter_if(rmm::exec_policy(),
output_row_iter,
output_row_iter + row_indices.size(),
output_row_start->view().begin<size_type>(),
validity_iter,
result->mutable_view().begin<size_type>());
// generate keys for each output row
//
// result = [1, 1, 2, 2, 2]
//
auto keys =
cudf::make_fixed_width_column(data_type{type_id::INT32}, output_size, mask_state::UNALLOCATED);
thrust::generate(rmm::exec_policy(),
keys->mutable_view().begin<size_type>(),
keys->mutable_view().end<size_type>(),
[] __device__() { return 0; });
thrust::scatter_if(rmm::exec_policy(),
validity_iter,
validity_iter + row_indices.size(),
output_row_start->view().begin<size_type>(),
validity_iter,
keys->mutable_view().begin<size_type>());
thrust::inclusive_scan(rmm::exec_policy(),
keys->view().begin<size_type>(),
keys->view().end<size_type>(),
keys->mutable_view().begin<size_type>());
// scan by key to generate final child row indices.
// input
// result = [6, 1, 11, 1, 1]
// keys = [1, 1, 2, 2, 2]
//
// output
// result = [6, 7, 11, 12, 13]
//
thrust::inclusive_scan_by_key(rmm::exec_policy(),
keys->view().begin<size_type>(),
keys->view().end<size_type>(),
result->view().begin<size_type>(),
result->mutable_view().begin<size_type>());
return result;
}
#define PROP_EXPECT_EQ(a, b) \
do { \
if (verbosity == debug_output_level::QUIET) { \
if (a != b) { return false; } \
} else { \
EXPECT_EQ(a, b); \
if (a != b) { \
if (verbosity == debug_output_level::FIRST_ERROR) { \
return false; \
} else { \
result = false; \
} \
} \
} \
} while (0)
template <bool check_exact_equality>
struct column_property_comparator {
bool types_equivalent(cudf::data_type const& lhs, cudf::data_type const& rhs)
{
return is_fixed_point(lhs) ? lhs.id() == rhs.id() : lhs == rhs;
}
size_type count_nulls(cudf::column_view const& c, cudf::column_view const& row_indices)
{
auto validity_iter = cudf::detail::make_counting_transform_iterator(
0,
[row_indices = row_indices.begin<size_type>(),
validity = c.null_mask(),
offset = c.offset()] __device__(int index) {
// both null mask and offsets data are not pre-sliced. so we need to add the column offset
// to every incoming index.
auto const true_index = row_indices[index] + offset;
return !validity || cudf::bit_is_set(validity, true_index) ? 0 : 1;
});
return thrust::reduce(rmm::exec_policy(), validity_iter, validity_iter + row_indices.size());
}
bool compare_common(cudf::column_view const& lhs,
cudf::column_view const& rhs,
cudf::column_view const& lhs_row_indices,
cudf::column_view const& rhs_row_indices,
debug_output_level verbosity)
{
bool result = true;
if (check_exact_equality) {
PROP_EXPECT_EQ(lhs.type(), rhs.type());
} else {
PROP_EXPECT_EQ(types_equivalent(lhs.type(), rhs.type()), true);
}
// DISCUSSION: does this make sense, semantically?
auto const lhs_size = check_exact_equality ? lhs.size() : lhs_row_indices.size();
auto const rhs_size = check_exact_equality ? rhs.size() : rhs_row_indices.size();
PROP_EXPECT_EQ(lhs_size, rhs_size);
if (lhs_size > 0 && check_exact_equality) { PROP_EXPECT_EQ(lhs.nullable(), rhs.nullable()); }
// DISCUSSION: does this make sense, semantically?
auto const lhs_null_count =
check_exact_equality ? lhs.null_count() : count_nulls(lhs, lhs_row_indices);
auto const rhs_null_count =
check_exact_equality ? rhs.null_count() : count_nulls(rhs, rhs_row_indices);
PROP_EXPECT_EQ(lhs_null_count, rhs_null_count);
// equivalent, but not exactly equal columns can have a different number of children if their
// sizes are both 0. Specifically, empty string columns may or may not have children.
if (check_exact_equality || (lhs.size() > 0 && lhs.null_count() < lhs.size())) {
PROP_EXPECT_EQ(lhs.num_children(), rhs.num_children());
}
return result;
}
template <typename T,
std::enable_if_t<!std::is_same<T, cudf::list_view>::value &&
!std::is_same<T, cudf::struct_view>::value>* = nullptr>
bool operator()(cudf::column_view const& lhs,
cudf::column_view const& rhs,
cudf::column_view const& lhs_row_indices,
cudf::column_view const& rhs_row_indices,
debug_output_level verbosity)
{
return compare_common(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity);
}
template <typename T, std::enable_if_t<std::is_same<T, cudf::list_view>::value>* = nullptr>
bool operator()(cudf::column_view const& lhs,
cudf::column_view const& rhs,
cudf::column_view const& lhs_row_indices,
cudf::column_view const& rhs_row_indices,
debug_output_level verbosity)
{
if (!compare_common(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity)) { return false; }
cudf::lists_column_view lhs_l(lhs);
cudf::lists_column_view rhs_l(rhs);
// recurse
auto lhs_child = lhs_l.get_sliced_child(rmm::cuda_stream_default);
// note: if a column is all nulls or otherwise empty, no indices are generated and no recursion
// happens
auto lhs_child_indices = generate_child_row_indices(lhs_l, lhs_row_indices);
if (lhs_child_indices->size() > 0) {
auto rhs_child = rhs_l.get_sliced_child(rmm::cuda_stream_default);
auto rhs_child_indices = generate_child_row_indices(rhs_l, rhs_row_indices);
return cudf::type_dispatcher(lhs_child.type(),
column_property_comparator<check_exact_equality>{},
lhs_child,
rhs_child,
*lhs_child_indices,
*rhs_child_indices,
verbosity);
}
return true;
}
template <typename T, std::enable_if_t<std::is_same<T, cudf::struct_view>::value>* = nullptr>
bool operator()(cudf::column_view const& lhs,
cudf::column_view const& rhs,
cudf::column_view const& lhs_row_indices,
cudf::column_view const& rhs_row_indices,
debug_output_level verbosity)
{
if (!compare_common(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity)) { return false; }
structs_column_view l_scv(lhs);
structs_column_view r_scv(rhs);
for (size_type i = 0; i < lhs.num_children(); i++) {
column_view lhs_child = l_scv.get_sliced_child(i);
column_view rhs_child = r_scv.get_sliced_child(i);
if (!cudf::type_dispatcher(lhs_child.type(),
column_property_comparator<check_exact_equality>{},
lhs_child,
rhs_child,
lhs_row_indices,
rhs_row_indices,
verbosity)) {
return false;
}
}
return true;
}
};
class corresponding_rows_unequal {
public:
corresponding_rows_unequal(table_device_view d_lhs,
table_device_view d_rhs,
column_device_view lhs_row_indices_,
column_device_view rhs_row_indices_)
: comp(d_lhs, d_rhs), lhs_row_indices(lhs_row_indices_), rhs_row_indices(rhs_row_indices_)
{
}
cudf::row_equality_comparator<true> comp;
__device__ bool operator()(size_type index)
{
return !comp(lhs_row_indices.element<size_type>(index),
rhs_row_indices.element<size_type>(index));
}
column_device_view lhs_row_indices;
column_device_view rhs_row_indices;
};
class corresponding_rows_not_equivalent {
table_device_view d_lhs;
table_device_view d_rhs;
column_device_view lhs_row_indices;
column_device_view rhs_row_indices;
public:
corresponding_rows_not_equivalent(table_device_view d_lhs,
table_device_view d_rhs,
column_device_view lhs_row_indices_,
column_device_view rhs_row_indices_)
: d_lhs(d_lhs),
d_rhs(d_rhs),
comp(d_lhs, d_rhs),
lhs_row_indices(lhs_row_indices_),
rhs_row_indices(rhs_row_indices_)
{
CUDF_EXPECTS(d_lhs.num_columns() == 1 and d_rhs.num_columns() == 1,
"Unsupported number of columns");
}
struct typed_element_not_equivalent {
template <typename T>
__device__ std::enable_if_t<std::is_floating_point<T>::value, bool> operator()(
column_device_view const& lhs,
column_device_view const& rhs,
size_type lhs_index,
size_type rhs_index)
{
if (lhs.is_valid(lhs_index) and rhs.is_valid(rhs_index)) {
T const x = lhs.element<T>(lhs_index);
T const y = rhs.element<T>(rhs_index);
// Must handle inf and nan separately
if (std::isinf(x) || std::isinf(y)) {
return x != y; // comparison of (inf==inf) returns true
} else if (std::isnan(x) || std::isnan(y)) {
return std::isnan(x) != std::isnan(y); // comparison of (nan==nan) returns false
} else {
constexpr int ulp = 4; // ulp = unit of least precision, value taken from google test
T const abs_x_minus_y = std::abs(x - y);
return abs_x_minus_y >= std::numeric_limits<T>::min() &&
abs_x_minus_y > std::numeric_limits<T>::epsilon() * std::abs(x + y) * ulp;
}
} else {
// if either is null, then the inequality was checked already
return true;
}
}
template <typename T, typename... Args>
__device__ std::enable_if_t<not std::is_floating_point<T>::value, bool> operator()(Args...)
{
// Non-floating point inequality is checked already
return true;
}
};
cudf::row_equality_comparator<true> comp;
__device__ bool operator()(size_type index)
{
auto const lhs_index = lhs_row_indices.element<size_type>(index);
auto const rhs_index = rhs_row_indices.element<size_type>(index);
if (not comp(lhs_index, rhs_index)) {
auto lhs_col = this->d_lhs.column(0);
auto rhs_col = this->d_rhs.column(0);
return type_dispatcher(
lhs_col.type(), typed_element_not_equivalent{}, lhs_col, rhs_col, lhs_index, rhs_index);
}
return false;
}
};
// Stringify the inconsistent values resulted from the comparison of two columns element-wise
std::string stringify_column_differences(cudf::device_span<int const> differences,
column_view const& lhs,
column_view const& rhs,
column_view const& lhs_row_indices,
column_view const& rhs_row_indices,
debug_output_level verbosity,
int depth)
{
CUDF_EXPECTS(not differences.empty(), "Shouldn't enter this function if `differences` is empty");
std::string const depth_str = depth > 0 ? "depth " + std::to_string(depth) + '\n' : "";
// move the differences to the host.
auto h_differences = cudf::detail::make_host_vector_sync(differences);
if (verbosity == debug_output_level::ALL_ERRORS) {
std::ostringstream buffer;
buffer << depth_str << "differences:" << std::endl;
auto source_table = cudf::table_view({lhs, rhs});
auto diff_column =
fixed_width_column_wrapper<int32_t>(h_differences.begin(), h_differences.end());
auto diff_table = cudf::gather(source_table, diff_column);
// Need to pull back the differences
auto const h_left_strings = to_strings(diff_table->get_column(0));
auto const h_right_strings = to_strings(diff_table->get_column(1));
for (size_t i = 0; i < h_differences.size(); ++i)
buffer << depth_str << "lhs[" << h_differences[i] << "] = " << h_left_strings[i] << ", rhs["
<< h_differences[i] << "] = " << h_right_strings[i] << std::endl;
return buffer.str();
} else {
auto const index = h_differences[0]; // only stringify first difference
auto const lhs_index =
cudf::detail::get_value<size_type>(lhs_row_indices, index, rmm::cuda_stream_default);
auto const rhs_index =
cudf::detail::get_value<size_type>(rhs_row_indices, index, rmm::cuda_stream_default);
auto diff_lhs = cudf::detail::slice(lhs, lhs_index, lhs_index + 1);
auto diff_rhs = cudf::detail::slice(rhs, rhs_index, rhs_index + 1);
return depth_str + "first difference: " + "lhs[" + std::to_string(index) +
"] = " + to_string(diff_lhs, "") + ", rhs[" + std::to_string(index) +
"] = " + to_string(diff_rhs, "");
}
}
// non-nested column types
template <typename T, bool check_exact_equality>
struct column_comparator_impl {
bool operator()(column_view const& lhs,
column_view const& rhs,
column_view const& lhs_row_indices,
column_view const& rhs_row_indices,
debug_output_level verbosity,
int depth)
{
auto d_lhs = cudf::table_device_view::create(table_view{{lhs}});
auto d_rhs = cudf::table_device_view::create(table_view{{rhs}});
auto d_lhs_row_indices = cudf::column_device_view::create(lhs_row_indices);
auto d_rhs_row_indices = cudf::column_device_view::create(rhs_row_indices);
using ComparatorType = std::conditional_t<check_exact_equality,
corresponding_rows_unequal,
corresponding_rows_not_equivalent>;
auto differences = rmm::device_uvector<int>(
lhs.size(), rmm::cuda_stream_default); // worst case: everything different
auto input_iter = thrust::make_counting_iterator(0);
auto diff_iter =
thrust::copy_if(rmm::exec_policy(),
input_iter,
input_iter + lhs_row_indices.size(),
differences.begin(),
ComparatorType(*d_lhs, *d_rhs, *d_lhs_row_indices, *d_rhs_row_indices));
differences.resize(thrust::distance(differences.begin(), diff_iter),
rmm::cuda_stream_default); // shrink back down
if (not differences.is_empty()) {
if (verbosity != debug_output_level::QUIET) {
// GTEST_FAIL() does a return that conflicts with our return type. so hide it in a lambda.
[&]() {
GTEST_FAIL() << stringify_column_differences(
differences, lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity, depth);
}();
}
return false;
}
return true;
}
};
// forward declaration for nested-type recursion.
template <bool check_exact_equality>
struct column_comparator;
// specialization for list columns
template <bool check_exact_equality>
struct column_comparator_impl<list_view, check_exact_equality> {
bool operator()(column_view const& lhs,
column_view const& rhs,
column_view const& lhs_row_indices,
column_view const& rhs_row_indices,
debug_output_level verbosity,
int depth)
{
lists_column_view lhs_l(lhs);
lists_column_view rhs_l(rhs);
CUDF_EXPECTS(lhs_row_indices.size() == rhs_row_indices.size(), "List column size mismatch");
if (lhs_row_indices.is_empty()) { return true; }
// worst case - everything is different
rmm::device_uvector<int> differences(lhs_row_indices.size(), rmm::cuda_stream_default);
// compare offsets, taking slicing into account
// left side
size_type lhs_shift =
cudf::detail::get_value<size_type>(lhs_l.offsets(), lhs_l.offset(), rmm::cuda_stream_default);
auto lhs_offsets = thrust::make_transform_iterator(
lhs_l.offsets().begin<size_type>() + lhs_l.offset(),
[lhs_shift] __device__(size_type offset) { return offset - lhs_shift; });
auto lhs_valids = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[mask = lhs_l.null_mask(), offset = lhs_l.offset()] __device__(size_type index) {
return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset);
});
// right side
size_type rhs_shift =
cudf::detail::get_value<size_type>(rhs_l.offsets(), rhs_l.offset(), rmm::cuda_stream_default);
auto rhs_offsets = thrust::make_transform_iterator(
rhs_l.offsets().begin<size_type>() + rhs_l.offset(),
[rhs_shift] __device__(size_type offset) { return offset - rhs_shift; });
auto rhs_valids = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[mask = rhs_l.null_mask(), offset = rhs_l.offset()] __device__(size_type index) {
return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset);
});
// when checking for equivalency, we can't compare offset values directly, we can only
// compare lengths of the rows, and only if valid. as a concrete example, you could have two
// equivalent columns with the following data:
//
// column A
// offsets = [0, 3, 5, 7]
// validity = [0, 1, 1, 1]
//
// column B
// offsets = [0, 0, 2, 4]
// validity = [0, 1, 1, 1]
//
// Row 0 in column A happens to have a positive length, even though the row is null, but column
// B does not. So the offsets for the remaining valid rows are fundamentally different even
// though the row lengths are the same.
//
auto input_iter = thrust::make_counting_iterator(0);
auto diff_iter = thrust::copy_if(
rmm::exec_policy(),
input_iter,
input_iter + lhs_row_indices.size(),
differences.begin(),
[lhs_offsets,
rhs_offsets,
lhs_valids,
rhs_valids,
lhs_indices = lhs_row_indices.begin<size_type>(),
rhs_indices = rhs_row_indices.begin<size_type>()] __device__(size_type index) {
auto const lhs_index = lhs_indices[index];
auto const rhs_index = rhs_indices[index];
// check for validity match
if (lhs_valids[lhs_index] != rhs_valids[rhs_index]) { return true; }
// if the row is valid, check that the length of the list is the same. do this
// for both the equivalency and exact equality checks.
if (lhs_valids[lhs_index] && ((lhs_offsets[lhs_index + 1] - lhs_offsets[lhs_index]) !=
(rhs_offsets[rhs_index + 1] - rhs_offsets[rhs_index]))) {
return true;
}
// if validity matches -and- is false, we can ignore the actual offset values. this
// is technically not checking "equal()", but it's how the non-list code path handles it
if (!lhs_valids[lhs_index]) { return false; }
// if checking exact equality, compare the actual offset values
if (check_exact_equality && lhs_offsets[lhs_index] != rhs_offsets[rhs_index]) {
return true;
}
return false;
});
differences.resize(thrust::distance(differences.begin(), diff_iter),
rmm::cuda_stream_default); // shrink back down
if (not differences.is_empty()) {
if (verbosity != debug_output_level::QUIET) {
// GTEST_FAIL() does a return that conflicts with our return type. so hide it in a lambda.
[&]() {
GTEST_FAIL() << stringify_column_differences(
differences, lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity, depth);
}();
}
return false;
}
// recurse.
auto lhs_child = lhs_l.get_sliced_child(rmm::cuda_stream_default);
// note: if a column is all nulls or otherwise empty, no indices are generated and no recursion
// happens
auto lhs_child_indices = generate_child_row_indices(lhs_l, lhs_row_indices);
if (lhs_child_indices->size() > 0) {
auto rhs_child = rhs_l.get_sliced_child(rmm::cuda_stream_default);
auto rhs_child_indices = generate_child_row_indices(rhs_l, rhs_row_indices);
return cudf::type_dispatcher(lhs_child.type(),
column_comparator<check_exact_equality>{},
lhs_child,
rhs_child,
*lhs_child_indices,
*rhs_child_indices,
verbosity,
depth + 1);
}
return true;
}
};
template <bool check_exact_equality>
struct column_comparator_impl<struct_view, check_exact_equality> {
bool operator()(column_view const& lhs,
column_view const& rhs,
column_view const& lhs_row_indices,
column_view const& rhs_row_indices,
debug_output_level verbosity,
int depth)
{
structs_column_view l_scv(lhs);
structs_column_view r_scv(rhs);
for (size_type i = 0; i < lhs.num_children(); i++) {
column_view lhs_child = l_scv.get_sliced_child(i);
column_view rhs_child = r_scv.get_sliced_child(i);
if (!cudf::type_dispatcher(lhs_child.type(),
column_comparator<check_exact_equality>{},
lhs_child,
rhs_child,
lhs_row_indices,
rhs_row_indices,
verbosity,
depth + 1)) {
return false;
}
}
return true;
}
};
template <bool check_exact_equality>
struct column_comparator {
template <typename T>
bool operator()(column_view const& lhs,
column_view const& rhs,
column_view const& lhs_row_indices,
column_view const& rhs_row_indices,
debug_output_level verbosity,
int depth = 0)
{
CUDF_EXPECTS(lhs_row_indices.size() == rhs_row_indices.size(),
"Mismatch in row counts to compare");
// compare properties
if (!cudf::type_dispatcher(lhs.type(),
column_property_comparator<check_exact_equality>{},
lhs,
rhs,
lhs_row_indices,
rhs_row_indices,
verbosity)) {
return false;
}
// compare values
column_comparator_impl<T, check_exact_equality> comparator{};
return comparator(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity, depth);
}
};
std::unique_ptr<column> generate_all_row_indices(size_type num_rows)
{
auto indices =
cudf::make_fixed_width_column(data_type{type_id::INT32}, num_rows, mask_state::UNALLOCATED);
thrust::sequence(rmm::exec_policy(),
indices->mutable_view().begin<size_type>(),
indices->mutable_view().end<size_type>(),
0);
return indices;
}
} // namespace
/**
* @copydoc cudf::test::expect_column_properties_equal
*/
bool expect_column_properties_equal(column_view const& lhs,
column_view const& rhs,
debug_output_level verbosity)
{
auto indices = generate_all_row_indices(lhs.size());
return cudf::type_dispatcher(
lhs.type(), column_property_comparator<true>{}, lhs, rhs, *indices, *indices, verbosity);
}
/**
* @copydoc cudf::test::expect_column_properties_equivalent
*/
bool expect_column_properties_equivalent(column_view const& lhs,
column_view const& rhs,
debug_output_level verbosity)
{
auto indices = generate_all_row_indices(lhs.size());
return cudf::type_dispatcher(
lhs.type(), column_property_comparator<false>{}, lhs, rhs, *indices, *indices, verbosity);
}
/**
* @copydoc cudf::test::expect_columns_equal
*/
bool expect_columns_equal(cudf::column_view const& lhs,
cudf::column_view const& rhs,
debug_output_level verbosity)
{
auto indices = generate_all_row_indices(lhs.size());
return cudf::type_dispatcher(
lhs.type(), column_comparator<true>{}, lhs, rhs, *indices, *indices, verbosity);
}
/**
* @copydoc cudf::test::expect_columns_equivalent
*/
bool expect_columns_equivalent(cudf::column_view const& lhs,
cudf::column_view const& rhs,
debug_output_level verbosity)
{
auto indices = generate_all_row_indices(lhs.size());
return cudf::type_dispatcher(
lhs.type(), column_comparator<false>{}, lhs, rhs, *indices, *indices, verbosity);
}
/**
* @copydoc cudf::test::expect_equal_buffers
*/
void expect_equal_buffers(void const* lhs, void const* rhs, std::size_t size_bytes)
{
if (size_bytes > 0) {
EXPECT_NE(nullptr, lhs);
EXPECT_NE(nullptr, rhs);
}
auto typed_lhs = static_cast<char const*>(lhs);
auto typed_rhs = static_cast<char const*>(rhs);
EXPECT_TRUE(thrust::equal(thrust::device, typed_lhs, typed_lhs + size_bytes, typed_rhs));
}
/**
* @copydoc cudf::test::bitmask_to_host
*/
std::vector<bitmask_type> bitmask_to_host(cudf::column_view const& c)
{
if (c.nullable()) {
auto num_bitmasks = bitmask_allocation_size_bytes(c.size()) / sizeof(bitmask_type);
std::vector<bitmask_type> host_bitmask(num_bitmasks);
if (c.offset() == 0) {
CUDA_TRY(cudaMemcpy(host_bitmask.data(),
c.null_mask(),
num_bitmasks * sizeof(bitmask_type),
cudaMemcpyDeviceToHost));
} else {
auto mask = copy_bitmask(c.null_mask(), c.offset(), c.offset() + c.size());
CUDA_TRY(cudaMemcpy(host_bitmask.data(),
mask.data(),
num_bitmasks * sizeof(bitmask_type),
cudaMemcpyDeviceToHost));
}
return host_bitmask;
} else {
return std::vector<bitmask_type>{};
}
}
namespace {
template <typename T, typename std::enable_if_t<std::is_integral<T>::value>* = nullptr>
static auto numeric_to_string_precise(T value)
{
return std::to_string(value);
}
template <typename T, typename std::enable_if_t<std::is_floating_point<T>::value>* = nullptr>
static auto numeric_to_string_precise(T value)
{
std::ostringstream o;
o << std::setprecision(std::numeric_limits<T>::max_digits10) << value;
return o.str();
}
static auto duration_suffix(cudf::duration_D) { return " days"; }
static auto duration_suffix(cudf::duration_s) { return " seconds"; }
static auto duration_suffix(cudf::duration_ms) { return " milliseconds"; }
static auto duration_suffix(cudf::duration_us) { return " microseconds"; }
static auto duration_suffix(cudf::duration_ns) { return " nanoseconds"; }
std::string get_nested_type_str(cudf::column_view const& view)
{
if (view.type().id() == cudf::type_id::LIST) {
lists_column_view lcv(view);
return cudf::jit::get_type_name(view.type()) + "<" + (get_nested_type_str(lcv.child())) + ">";
}
if (view.type().id() == cudf::type_id::STRUCT) {
std::ostringstream out;
out << cudf::jit::get_type_name(view.type()) + "<";
std::transform(view.child_begin(),
view.child_end(),
std::ostream_iterator<std::string>(out, ","),
[&out](auto const col) { return get_nested_type_str(col); });
out << ">";
return out.str();
}
return cudf::jit::get_type_name(view.type());
}
template <typename NestedColumnView>
std::string nested_offsets_to_string(NestedColumnView const& c, std::string const& delimiter = ", ")
{
column_view offsets = (c.parent()).child(NestedColumnView::offsets_column_index);
CUDF_EXPECTS(offsets.type().id() == type_id::INT32,
"Column does not appear to be an offsets column");
CUDF_EXPECTS(offsets.offset() == 0, "Offsets column has an internal offset!");
size_type output_size = c.size() + 1;
// the first offset value to normalize everything against
size_type first =
cudf::detail::get_value<size_type>(offsets, c.offset(), rmm::cuda_stream_default);
rmm::device_uvector<size_type> shifted_offsets(output_size, rmm::cuda_stream_default);
// normalize the offset values for the column offset
size_type const* d_offsets = offsets.head<size_type>() + c.offset();
thrust::transform(
rmm::exec_policy(),
d_offsets,
d_offsets + output_size,
shifted_offsets.begin(),
[first] __device__(int32_t offset) { return static_cast<size_type>(offset - first); });
auto const h_shifted_offsets = cudf::detail::make_host_vector_sync(shifted_offsets);
std::ostringstream buffer;
for (size_t idx = 0; idx < h_shifted_offsets.size(); idx++) {
buffer << h_shifted_offsets[idx];
if (idx < h_shifted_offsets.size() - 1) { buffer << delimiter; }
}
return buffer.str();
}
struct column_view_printer {
template <typename Element, typename std::enable_if_t<is_numeric<Element>()>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&)
{
auto h_data = cudf::test::to_host<Element>(col);
out.resize(col.size());
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return bit_is_set(h_data.second.data(), idx)
? numeric_to_string_precise(h_data.first[idx])
: std::string("NULL");
});
} else {
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) {
return numeric_to_string_precise(el);
});
}
}
template <typename Element, typename std::enable_if_t<is_timestamp<Element>()>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
//
// For timestamps, convert timestamp column to column of strings, then
// call string version
//
auto col_as_strings = cudf::strings::from_timestamps(col);
if (col_as_strings->size() == 0) { return; }
this->template operator()<cudf::string_view>(*col_as_strings, out, indent);
}
template <typename Element, typename std::enable_if_t<cudf::is_fixed_point<Element>()>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&)
{
auto const h_data = cudf::test::to_host<Element>(col);
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
std::back_inserter(out),
[&h_data](auto idx) {
return h_data.second.empty() || bit_is_set(h_data.second.data(), idx)
? static_cast<std::string>(h_data.first[idx])
: std::string("NULL");
});
} else {
std::transform(std::cbegin(h_data.first),
std::cend(h_data.first),
std::back_inserter(out),
[col](auto const& fp) { return static_cast<std::string>(fp); });
}
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::string_view>::value>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&)
{
//
// Implementation for strings, call special to_host variant
//
if (col.is_empty()) return;
auto h_data = cudf::test::to_host<std::string>(col);
out.resize(col.size());
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return h_data.second.empty() || bit_is_set(h_data.second.data(), idx)
? h_data.first[idx]
: std::string("NULL");
});
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::dictionary32>::value>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&)
{
cudf::dictionary_column_view dictionary(col);
if (col.is_empty()) return;
std::vector<std::string> keys = to_strings(dictionary.keys());
std::vector<std::string> indices = to_strings({dictionary.indices().type(),
dictionary.size(),
dictionary.indices().head(),
dictionary.null_mask(),
dictionary.null_count(),
dictionary.offset()});
out.insert(out.end(), keys.begin(), keys.end());
if (!indices.empty()) {
std::string first = "\x08 : " + indices.front(); // use : as delimiter
out.push_back(first); // between keys and indices
out.insert(out.end(), indices.begin() + 1, indices.end());
}
}
// Print the tick counts with the units
template <typename Element, typename std::enable_if_t<is_duration<Element>()>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&)
{
auto h_data = cudf::test::to_host<Element>(col);
out.resize(col.size());
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return bit_is_set(h_data.second.data(), idx)
? numeric_to_string_precise(h_data.first[idx].count()) +
duration_suffix(h_data.first[idx])
: std::string("NULL");
});
} else {
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) {
return numeric_to_string_precise(el.count()) + duration_suffix(el);
});
}
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::list_view>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
lists_column_view lcv(col);
// propagate slicing to the child if necessary
column_view child = lcv.get_sliced_child(rmm::cuda_stream_default);
bool const is_sliced = lcv.offset() > 0 || child.offset() > 0;
std::string tmp =
get_nested_type_str(col) + (is_sliced ? "(sliced)" : "") + ":\n" + indent +
"Length : " + std::to_string(lcv.size()) + "\n" + indent +
"Offsets : " + (lcv.size() > 0 ? nested_offsets_to_string(lcv) : "") + "\n" +
(lcv.parent().nullable()
? indent + "Null count: " + std::to_string(lcv.null_count()) + "\n" +
detail::to_string(bitmask_to_host(col), col.size(), indent) + "\n"
: "") +
// non-nested types don't typically display their null masks, so do it here for convenience.
(!is_nested(child.type()) && child.nullable()
? " " + detail::to_string(bitmask_to_host(child), child.size(), indent) + "\n"
: "") +
(detail::to_string(child, ", ", indent + " ")) + "\n";
out.push_back(tmp);
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::struct_view>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
structs_column_view view{col};
std::ostringstream out_stream;
out_stream << get_nested_type_str(col) << ":\n"
<< indent << "Length : " << view.size() << ":\n";
if (view.nullable()) {
out_stream << indent << "Null count: " << view.null_count() << "\n"
<< detail::to_string(bitmask_to_host(col), col.size(), indent) << "\n";
}
auto iter = thrust::make_counting_iterator(0);
std::transform(
iter,
iter + view.num_children(),
std::ostream_iterator<std::string>(out_stream, "\n"),
[&](size_type index) {
auto child = view.get_sliced_child(index);
// non-nested types don't typically display their null masks, so do it here for convenience.
return (!is_nested(child.type()) && child.nullable()
? " " + detail::to_string(bitmask_to_host(child), child.size(), indent) + "\n"
: "") +
detail::to_string(child, ", ", indent + " ");
});
out.push_back(out_stream.str());
}
};
} // namespace
namespace detail {
/**
* @copydoc cudf::test::detail::to_strings
*/
std::vector<std::string> to_strings(cudf::column_view const& col, std::string const& indent)
{
std::vector<std::string> reply;
cudf::type_dispatcher(col.type(), column_view_printer{}, col, reply, indent);
return reply;
}
/**
* @copydoc cudf::test::detail::to_string(cudf::column_view, std::string, std::string)
*
* @param indent Indentation for all output
*/
std::string to_string(cudf::column_view const& col,
std::string const& delimiter,
std::string const& indent)
{
std::ostringstream buffer;
std::vector<std::string> h_data = to_strings(col, indent);
buffer << indent;
std::copy(h_data.begin(),
h_data.end() - (!h_data.empty()),
std::ostream_iterator<std::string>(buffer, delimiter.c_str()));
if (!h_data.empty()) buffer << h_data.back();
return buffer.str();
}
/**
* @copydoc cudf::test::detail::to_string(std::vector<bitmask_type>, size_type, std::string)
*
* @param indent Indentation for all output. See comment in `to_strings` for
* a detailed description.
*/
std::string to_string(std::vector<bitmask_type> const& null_mask,
size_type null_mask_size,
std::string const& indent)
{
std::ostringstream buffer;
buffer << indent;
for (int idx = null_mask_size - 1; idx >= 0; idx--) {
buffer << (cudf::bit_is_set(null_mask.data(), idx) ? "1" : "0");
}
return buffer.str();
}
} // namespace detail
/**
* @copydoc cudf::test::to_strings
*/
std::vector<std::string> to_strings(cudf::column_view const& col)
{
return detail::to_strings(col);
}
/**
* @copydoc cudf::test::to_string(cudf::column_view, std::string)
*/
std::string to_string(cudf::column_view const& col, std::string const& delimiter)
{
return detail::to_string(col, delimiter);
}
/**
* @copydoc cudf::test::to_string(std::vector<bitmask_type>, size_type)
*/
std::string to_string(std::vector<bitmask_type> const& null_mask, size_type null_mask_size)
{
return detail::to_string(null_mask, null_mask_size);
}
/**
* @copydoc cudf::test::print
*/
void print(cudf::column_view const& col, std::ostream& os, std::string const& delimiter)
{
os << to_string(col, delimiter) << std::endl;
}
/**
* @copydoc cudf::test::validate_host_masks
*/
bool validate_host_masks(std::vector<bitmask_type> const& expected_mask,
std::vector<bitmask_type> const& got_mask,
size_type number_of_elements)
{
return std::all_of(thrust::make_counting_iterator(0),
thrust::make_counting_iterator(number_of_elements),
[&expected_mask, &got_mask](auto index) {
return cudf::bit_is_set(expected_mask.data(), index) ==
cudf::bit_is_set(got_mask.data(), index);
});
}
} // namespace test
} // namespace cudf
|
80c57390fb06ac5730fca4c32ec41d04878d599c.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* This expermental software is provided AS IS.
* Feel free to use/modify/distribute,
* If used, please retain this disclaimer and cite
* "GPUfs: Integrating a file system with GPUs",
* M Silberstein,B Ford,I Keidar,E Witchel
* ASPLOS13, March 2013, Houston,USA
*/
#define FETCH_SIZE 4096
__device__ void inner_product_simple( float* a, float* b, int size,float* res)
{
float tmp=0;
__syncthreads();
if (threadIdx.x==0) {
*res=0;
}
__syncthreads();
int i=0;
for( i=threadIdx.x;i<size;i+=blockDim.x){
tmp+=(a[i]*b[i]);
}
atomicAdd((float*)res,tmp);
__syncthreads();
}
__device__ void prefetch(float* s_v, float* v,int size){
for(int i=threadIdx.x;i<size;i+=blockDim.x)
{
s_v[i]=v[i];
}
}
__shared__ float tmp_res_simple;
__global__ void bigmatrix_nofiles_simple(float* m, float *v, float* o,int out_offset, int m_size,int v_size)
{
int per_block=m_size/gridDim.x;
for(size_t out=per_block*blockIdx.x;out<(blockIdx.x+1)*per_block;out+=v_size){
inner_product_simple(v,m+out, v_size,&tmp_res_simple);
if(threadIdx.x==0)
{
(*(o+out_offset + (out/v_size)))=tmp_res_simple;
}
__syncthreads();
}
}
#include <sys/time.h>
double _timestamp(){
struct timeval tv;
gettimeofday(&tv,0);
return 1e6*tv.tv_sec+tv.tv_usec;
}
#include <stdio.h>
#include <sys/mman.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "timer.h"
#include <math.h>
#include <stdlib.h>
#include <limits.h>
#include <assert.h>
#include <errno.h>
#include <string.h>
#include <math.h>
void stdavg(double *avg_time, double *avg_thpt, double* std_time, double *std_thpt, const double* times, const double total_data, int arr_len)
{
*avg_time=*avg_thpt=*std_time=*std_thpt=0;
int counter=0;
for( int i=0;i<arr_len;i++){
if (times[i]<=0) continue;
*avg_time+=times[i];
*avg_thpt+=((double)total_data)/times[i];
counter++;
}
if (counter==0) return;
*avg_time/=(double)counter;
*avg_thpt/=(double)counter;
for( int i=0;i<arr_len;i++){
if (times[i]<=0) continue;
*std_time=(times[i]-*avg_time)*(times[i]-*avg_time);
double tmp=(((double)total_data)/times[i])-*avg_thpt;
*std_thpt=tmp*tmp;
}
*std_time/=(double)counter;
*std_thpt/=(double)counter;
*std_time=sqrt(*std_time);
*std_thpt=sqrt(*std_thpt);
}
void* open_map_file(const char* f, int* fd, size_t* size, int type, bool do_mmap=true)
{
int open_fd=open(f,type==O_RDONLY?type:type|O_CREAT|O_TRUNC,S_IRUSR|S_IWUSR);
if (open_fd<0){
perror("open failed");
return NULL;
}
if (type!=O_RDONLY) {
assert(*size>0);
if (ftruncate(open_fd,*size)){
perror("ftrunc failed");
return NULL;
}
}
struct stat s;
if (fstat(open_fd,&s)) {
fprintf(stderr,"Problem with fstat the file on CPU: %s \n ",strerror(errno));
}
if (s.st_size==0) {
fprintf(stderr,"file with zero lenght, skipping %s\n",f);
close(open_fd);
return NULL;
}
void* data=NULL;
if (do_mmap){
data=mmap(NULL,s.st_size,type==O_RDONLY?PROT_READ:PROT_READ|PROT_WRITE,MAP_POPULATE|(O_RDONLY?MAP_PRIVATE:MAP_SHARED),open_fd,0);
if (data==MAP_FAILED) {
perror("mmap");
close(open_fd);
return NULL;
}
}
*fd=open_fd;
*size=s.st_size;
return data;
}
#define CUDA_SAFE_CALL(x) if((x)!=hipSuccess) { fprintf(stderr,"CUDA ERROR %s: %d %s\n",__FILE__, __LINE__, hipGetErrorString(hipGetLastError())); exit(-1); }
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include "fs_constants.h"
#include "fs_debug.cu.h"
#include "util.cu.h"
#include "timer.h"
//DEBUG
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <string.h>
void unmap_close_file(int fd, void* ptr,size_t len, const char* what)
{
if(munmap(ptr,len)) { perror("unmap"); fprintf(stderr,"%s\n",what); };
close(fd);
}
#include <assert.h>
const int NUM_STREAMS=16;
// size of the output used for data staging
#define MAX_TRIALS (10)
double times[MAX_TRIALS];
double total_data;
int main( int argc, char** argv)
{
if(argc<4) {
fprintf(stderr," <vector> <matrix> <output>\n\n");
return -1;
}
double total_time=0;
int nthreads=256;
int trials =3;
memset(times,0,sizeof(double)*MAX_TRIALS);
for(int t=-1;t<trials+1;t++){
int fd_m;
size_t size_m;
char* h_matrix=(char*)open_map_file(argv[2], &fd_m, &size_m, O_RDONLY,false);
posix_fadvise(fd_m,0,0,POSIX_FADV_WILLNEED);
// assert(h_matrix);
float* h_d_matrix[NUM_STREAMS];
float* d_matrix[NUM_STREAMS];
// size_t data_per_chunk=146800640/2;
size_t data_per_chunk=size_m/8;
assert(size_m%data_per_chunk==0);
printf("Data per chunk: %lu\n",data_per_chunk);
assert(data_per_chunk%sizeof(float)==0);
for (int i=0;i<NUM_STREAMS;i++){
// CUDA_SAFE_CALL(hipHostMalloc(&h_d_matrix[i], data_per_chunk, hipHostMallocDefault));
h_d_matrix[i] = (float*)malloc(data_per_chunk);
CUDA_SAFE_CALL(hipMalloc(&d_matrix[i],data_per_chunk));
}
double time_before=_timestamp();
if (t<=0) time_before=0;
int fd_v;
size_t size_v;
char* h_vector=(char*)open_map_file(argv[1],&fd_v,&size_v,O_RDONLY);
assert(h_vector);
float* d_vector;
CUDA_SAFE_CALL(hipMalloc(&d_vector,size_v));
int values_per_block=10;
int nblocks=(data_per_chunk/size_v/values_per_block);
assert(data_per_chunk/size_v/nblocks>0);
assert((data_per_chunk/size_v)%nblocks==0);
printf("Running with %d blocks, %d threads, %d vals per block\n",nblocks, nthreads,(data_per_chunk/size_v)/nblocks );
int fd_v_out;
size_t size_v_out=size_m/size_v*sizeof(float);
assert(size_v_out);
char* h_v_out=(char*)open_map_file(argv[3], &fd_v_out, &size_v_out, O_RDWR);
assert(h_v_out);
float* h_d_v_out;
float* d_v_out;
h_d_v_out = (float*)malloc(size_v_out);
CUDA_SAFE_CALL(hipMalloc(&d_v_out,size_v_out));
fprintf(stderr,"using: %s for matrix of size %lu, %s for vector of size %lu, %s for output of size %lu, data per chunk %lu\n",
argv[2], size_m,argv[1],size_v,argv[3],size_v_out,data_per_chunk);
hipStream_t s[NUM_STREAMS];
for(int i=0;i<NUM_STREAMS;i++){
CUDA_SAFE_CALL(hipStreamCreate(&s[i]));
}
int c=0;
CUDA_SAFE_CALL(hipMemcpy(d_vector,h_vector,size_v,hipMemcpyHostToDevice));
for(size_t i=0 ;i<size_m;i+=data_per_chunk)
{
fprintf(stderr,"chunk %lu %d\n",i, i/data_per_chunk);
size_t total_read=0;
CUDA_SAFE_CALL(hipStreamSynchronize(s[c]));
total_read=pread(fd_m, (char*)(h_d_matrix[c]), data_per_chunk, i);
CUDA_SAFE_CALL(hipMemcpyAsync((char*)(d_matrix[c]),h_d_matrix[c],
data_per_chunk,hipMemcpyHostToDevice,s[c]));
hipLaunchKernelGGL(( bigmatrix_nofiles_simple), dim3(nblocks),dim3(nthreads),0,s[c], d_matrix[c],d_vector,d_v_out,i/size_v,
data_per_chunk/(sizeof(float)), size_v/(sizeof(float)));
CUDA_SAFE_CALL(hipMemcpyAsync(h_d_v_out+i/size_v,d_v_out+i/size_v,data_per_chunk/size_v*sizeof(float),hipMemcpyDeviceToHost,s[c]));
c++;
c%=NUM_STREAMS;
}
hipError_t error = hipDeviceSynchronize();
//Check for errors and failed asserts in asynchronous kernel launch.
if(error != hipSuccess )
{
printf("Device failed, CUDA error message is: %s\n\n", hipGetErrorString(error));
}
memcpy(h_v_out,h_d_v_out,size_v_out);
hipFree(d_v_out);
hipFree(d_vector);
unmap_close_file(fd_v_out,h_v_out,size_v_out,"out");
close(fd_m);
unmap_close_file(fd_v,h_vector,size_v,"vector");
double time_after=_timestamp();
total_time+=(time_after-time_before);
if(t<=0) total_time=0;
if (t>0) times[t]=(time_after-time_before);
if (t>0) fprintf(stderr,"total time %.0f us, avg %.0f us, bw %.3f GB/s \n ", time_after-time_before, total_time/t, t*1.0e6*(size_v+size_m+size_v_out)/total_time/(1024.0*1024.0*1024.0));
total_data=(size_v+size_m+size_v_out);
free(h_d_v_out);
for (int i=0;i<NUM_STREAMS;i++){
free(h_d_matrix[i]);
CUDA_SAFE_CALL(hipFree(d_matrix[i]));
}
hipDeviceReset();
}
double avg_time, avg_thpt, std_time, std_thpt;
stdavg(&avg_time, &avg_thpt, &std_time, &std_thpt, times, total_data/(1024*1024*1024), MAX_TRIALS);
fprintf(stderr,"total time avg %.0f us +/- %.3f, avg_thpt %.3f GB/s +/- %.3f\n ", avg_time,std_time, 1e6*avg_thpt,1e6*std_thpt );
return 0;
}
|
80c57390fb06ac5730fca4c32ec41d04878d599c.cu
|
/*
* This expermental software is provided AS IS.
* Feel free to use/modify/distribute,
* If used, please retain this disclaimer and cite
* "GPUfs: Integrating a file system with GPUs",
* M Silberstein,B Ford,I Keidar,E Witchel
* ASPLOS13, March 2013, Houston,USA
*/
#define FETCH_SIZE 4096
__device__ void inner_product_simple( float* a, float* b, int size,float* res)
{
float tmp=0;
__syncthreads();
if (threadIdx.x==0) {
*res=0;
}
__syncthreads();
int i=0;
for( i=threadIdx.x;i<size;i+=blockDim.x){
tmp+=(a[i]*b[i]);
}
atomicAdd((float*)res,tmp);
__syncthreads();
}
__device__ void prefetch(float* s_v, float* v,int size){
for(int i=threadIdx.x;i<size;i+=blockDim.x)
{
s_v[i]=v[i];
}
}
__shared__ float tmp_res_simple;
__global__ void bigmatrix_nofiles_simple(float* m, float *v, float* o,int out_offset, int m_size,int v_size)
{
int per_block=m_size/gridDim.x;
for(size_t out=per_block*blockIdx.x;out<(blockIdx.x+1)*per_block;out+=v_size){
inner_product_simple(v,m+out, v_size,&tmp_res_simple);
if(threadIdx.x==0)
{
(*(o+out_offset + (out/v_size)))=tmp_res_simple;
}
__syncthreads();
}
}
#include <sys/time.h>
double _timestamp(){
struct timeval tv;
gettimeofday(&tv,0);
return 1e6*tv.tv_sec+tv.tv_usec;
}
#include <stdio.h>
#include <sys/mman.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "timer.h"
#include <math.h>
#include <stdlib.h>
#include <limits.h>
#include <assert.h>
#include <errno.h>
#include <string.h>
#include <math.h>
void stdavg(double *avg_time, double *avg_thpt, double* std_time, double *std_thpt, const double* times, const double total_data, int arr_len)
{
*avg_time=*avg_thpt=*std_time=*std_thpt=0;
int counter=0;
for( int i=0;i<arr_len;i++){
if (times[i]<=0) continue;
*avg_time+=times[i];
*avg_thpt+=((double)total_data)/times[i];
counter++;
}
if (counter==0) return;
*avg_time/=(double)counter;
*avg_thpt/=(double)counter;
for( int i=0;i<arr_len;i++){
if (times[i]<=0) continue;
*std_time=(times[i]-*avg_time)*(times[i]-*avg_time);
double tmp=(((double)total_data)/times[i])-*avg_thpt;
*std_thpt=tmp*tmp;
}
*std_time/=(double)counter;
*std_thpt/=(double)counter;
*std_time=sqrt(*std_time);
*std_thpt=sqrt(*std_thpt);
}
void* open_map_file(const char* f, int* fd, size_t* size, int type, bool do_mmap=true)
{
int open_fd=open(f,type==O_RDONLY?type:type|O_CREAT|O_TRUNC,S_IRUSR|S_IWUSR);
if (open_fd<0){
perror("open failed");
return NULL;
}
if (type!=O_RDONLY) {
assert(*size>0);
if (ftruncate(open_fd,*size)){
perror("ftrunc failed");
return NULL;
}
}
struct stat s;
if (fstat(open_fd,&s)) {
fprintf(stderr,"Problem with fstat the file on CPU: %s \n ",strerror(errno));
}
if (s.st_size==0) {
fprintf(stderr,"file with zero lenght, skipping %s\n",f);
close(open_fd);
return NULL;
}
void* data=NULL;
if (do_mmap){
data=mmap(NULL,s.st_size,type==O_RDONLY?PROT_READ:PROT_READ|PROT_WRITE,MAP_POPULATE|(O_RDONLY?MAP_PRIVATE:MAP_SHARED),open_fd,0);
if (data==MAP_FAILED) {
perror("mmap");
close(open_fd);
return NULL;
}
}
*fd=open_fd;
*size=s.st_size;
return data;
}
#define CUDA_SAFE_CALL(x) if((x)!=cudaSuccess) { fprintf(stderr,"CUDA ERROR %s: %d %s\n",__FILE__, __LINE__, cudaGetErrorString(cudaGetLastError())); exit(-1); }
#include <cuda.h>
#include <cuda_runtime.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include "fs_constants.h"
#include "fs_debug.cu.h"
#include "util.cu.h"
#include "timer.h"
//DEBUG
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <string.h>
void unmap_close_file(int fd, void* ptr,size_t len, const char* what)
{
if(munmap(ptr,len)) { perror("unmap"); fprintf(stderr,"%s\n",what); };
close(fd);
}
#include <assert.h>
const int NUM_STREAMS=16;
// size of the output used for data staging
#define MAX_TRIALS (10)
double times[MAX_TRIALS];
double total_data;
int main( int argc, char** argv)
{
if(argc<4) {
fprintf(stderr," <vector> <matrix> <output>\n\n");
return -1;
}
double total_time=0;
int nthreads=256;
int trials =3;
memset(times,0,sizeof(double)*MAX_TRIALS);
for(int t=-1;t<trials+1;t++){
int fd_m;
size_t size_m;
char* h_matrix=(char*)open_map_file(argv[2], &fd_m, &size_m, O_RDONLY,false);
posix_fadvise(fd_m,0,0,POSIX_FADV_WILLNEED);
// assert(h_matrix);
float* h_d_matrix[NUM_STREAMS];
float* d_matrix[NUM_STREAMS];
// size_t data_per_chunk=146800640/2;
size_t data_per_chunk=size_m/8;
assert(size_m%data_per_chunk==0);
printf("Data per chunk: %lu\n",data_per_chunk);
assert(data_per_chunk%sizeof(float)==0);
for (int i=0;i<NUM_STREAMS;i++){
// CUDA_SAFE_CALL(cudaHostAlloc(&h_d_matrix[i], data_per_chunk, cudaHostAllocDefault));
h_d_matrix[i] = (float*)malloc(data_per_chunk);
CUDA_SAFE_CALL(cudaMalloc(&d_matrix[i],data_per_chunk));
}
double time_before=_timestamp();
if (t<=0) time_before=0;
int fd_v;
size_t size_v;
char* h_vector=(char*)open_map_file(argv[1],&fd_v,&size_v,O_RDONLY);
assert(h_vector);
float* d_vector;
CUDA_SAFE_CALL(cudaMalloc(&d_vector,size_v));
int values_per_block=10;
int nblocks=(data_per_chunk/size_v/values_per_block);
assert(data_per_chunk/size_v/nblocks>0);
assert((data_per_chunk/size_v)%nblocks==0);
printf("Running with %d blocks, %d threads, %d vals per block\n",nblocks, nthreads,(data_per_chunk/size_v)/nblocks );
int fd_v_out;
size_t size_v_out=size_m/size_v*sizeof(float);
assert(size_v_out);
char* h_v_out=(char*)open_map_file(argv[3], &fd_v_out, &size_v_out, O_RDWR);
assert(h_v_out);
float* h_d_v_out;
float* d_v_out;
h_d_v_out = (float*)malloc(size_v_out);
CUDA_SAFE_CALL(cudaMalloc(&d_v_out,size_v_out));
fprintf(stderr,"using: %s for matrix of size %lu, %s for vector of size %lu, %s for output of size %lu, data per chunk %lu\n",
argv[2], size_m,argv[1],size_v,argv[3],size_v_out,data_per_chunk);
cudaStream_t s[NUM_STREAMS];
for(int i=0;i<NUM_STREAMS;i++){
CUDA_SAFE_CALL(cudaStreamCreate(&s[i]));
}
int c=0;
CUDA_SAFE_CALL(cudaMemcpy(d_vector,h_vector,size_v,cudaMemcpyHostToDevice));
for(size_t i=0 ;i<size_m;i+=data_per_chunk)
{
fprintf(stderr,"chunk %lu %d\n",i, i/data_per_chunk);
size_t total_read=0;
CUDA_SAFE_CALL(cudaStreamSynchronize(s[c]));
total_read=pread(fd_m, (char*)(h_d_matrix[c]), data_per_chunk, i);
CUDA_SAFE_CALL(cudaMemcpyAsync((char*)(d_matrix[c]),h_d_matrix[c],
data_per_chunk,cudaMemcpyHostToDevice,s[c]));
bigmatrix_nofiles_simple<<<nblocks,nthreads,0,s[c]>>>(d_matrix[c],d_vector,d_v_out,i/size_v,
data_per_chunk/(sizeof(float)), size_v/(sizeof(float)));
CUDA_SAFE_CALL(cudaMemcpyAsync(h_d_v_out+i/size_v,d_v_out+i/size_v,data_per_chunk/size_v*sizeof(float),cudaMemcpyDeviceToHost,s[c]));
c++;
c%=NUM_STREAMS;
}
cudaError_t error = cudaDeviceSynchronize();
//Check for errors and failed asserts in asynchronous kernel launch.
if(error != cudaSuccess )
{
printf("Device failed, CUDA error message is: %s\n\n", cudaGetErrorString(error));
}
memcpy(h_v_out,h_d_v_out,size_v_out);
cudaFree(d_v_out);
cudaFree(d_vector);
unmap_close_file(fd_v_out,h_v_out,size_v_out,"out");
close(fd_m);
unmap_close_file(fd_v,h_vector,size_v,"vector");
double time_after=_timestamp();
total_time+=(time_after-time_before);
if(t<=0) total_time=0;
if (t>0) times[t]=(time_after-time_before);
if (t>0) fprintf(stderr,"total time %.0f us, avg %.0f us, bw %.3f GB/s \n ", time_after-time_before, total_time/t, t*1.0e6*(size_v+size_m+size_v_out)/total_time/(1024.0*1024.0*1024.0));
total_data=(size_v+size_m+size_v_out);
free(h_d_v_out);
for (int i=0;i<NUM_STREAMS;i++){
free(h_d_matrix[i]);
CUDA_SAFE_CALL(cudaFree(d_matrix[i]));
}
cudaDeviceReset();
}
double avg_time, avg_thpt, std_time, std_thpt;
stdavg(&avg_time, &avg_thpt, &std_time, &std_thpt, times, total_data/(1024*1024*1024), MAX_TRIALS);
fprintf(stderr,"total time avg %.0f us +/- %.3f, avg_thpt %.3f GB/s +/- %.3f\n ", avg_time,std_time, 1e6*avg_thpt,1e6*std_thpt );
return 0;
}
|
a256062ba65f15212d03c259df42ae136dd78faf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
template<bool betazero>
__global__ void
zmgeelltmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
extern __shared__ magmaDoubleComplex dot[];
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ) {
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_rows * n + row ];
magmaDoubleComplex val = dval [ num_rows * n + row ];
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[col + i * num_cols ];
}
for( int i=0; i<num_vecs; i++ ) {
if (betazero) {
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] *alpha;
} else {
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * dy [ row + i*num_cols ];
}
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zmgeelltmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaDoubleComplex ); // num_vecs vectors
if (beta == MAGMA_Z_ZERO) {
hipLaunchKernelGGL(( zmgeelltmv_kernel<true>), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream() ,
m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zmgeelltmv_kernel<false>), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream() ,
m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
}
return MAGMA_SUCCESS;
}
|
a256062ba65f15212d03c259df42ae136dd78faf.cu
|
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
template<bool betazero>
__global__ void
zmgeelltmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
extern __shared__ magmaDoubleComplex dot[];
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ) {
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_rows * n + row ];
magmaDoubleComplex val = dval [ num_rows * n + row ];
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[col + i * num_cols ];
}
for( int i=0; i<num_vecs; i++ ) {
if (betazero) {
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] *alpha;
} else {
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * dy [ row + i*num_cols ];
}
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zmgeelltmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaDoubleComplex ); // num_vecs vectors
if (beta == MAGMA_Z_ZERO) {
zmgeelltmv_kernel<true><<< grid, threads, MEM_SIZE, queue->cuda_stream() >>>
( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
} else {
zmgeelltmv_kernel<false><<< grid, threads, MEM_SIZE, queue->cuda_stream() >>>
( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
}
return MAGMA_SUCCESS;
}
|
21423e3552f053a1b87097a493815f69be65b6fe.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vecmul.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vecmul), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vecmul), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vecmul), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
21423e3552f053a1b87097a493815f69be65b6fe.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vecmul.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vecmul<<<gridBlock,threadBlock>>>(A,B,C,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vecmul<<<gridBlock,threadBlock>>>(A,B,C,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vecmul<<<gridBlock,threadBlock>>>(A,B,C,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
f11a43b1818a3da597c2af726e7cebd02b003078.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include "lbm_solver_gpu.h"
#include "lbm_model.h"
#include "lbm_model_gpu.cuh"
#include "utils.h"
#include "utils_gpu.h"
#include "cell_computation_gpu.cuh"
__constant__ float tau_d, wall_velocity_d[D_LBM];
__constant__ int xlength_d, num_cells_d;
__device__ float *stream_field_d, *collide_field_d;
/**
* Computes the post-collision distribution functions according to the BGK update rule and
* stores the results again at the same position.
*/
__device__ void ComputePostCollisionDistributionsGpu(float *current_cell, float *feq){
current_cell[0]=current_cell[0]-(current_cell[0]-feq[0])/tau_d;
current_cell[1]=current_cell[1]-(current_cell[1]-feq[1])/tau_d;
current_cell[2]=current_cell[2]-(current_cell[2]-feq[2])/tau_d;
current_cell[3]=current_cell[3]-(current_cell[3]-feq[3])/tau_d;
current_cell[4]=current_cell[4]-(current_cell[4]-feq[4])/tau_d;
current_cell[5]=current_cell[5]-(current_cell[5]-feq[5])/tau_d;
current_cell[6]=current_cell[6]-(current_cell[6]-feq[6])/tau_d;
current_cell[7]=current_cell[7]-(current_cell[7]-feq[7])/tau_d;
current_cell[8]=current_cell[8]-(current_cell[8]-feq[8])/tau_d;
current_cell[9]=current_cell[9]-(current_cell[9]-feq[9])/tau_d;
current_cell[10]=current_cell[10]-(current_cell[10]-feq[10])/tau_d;
current_cell[11]=current_cell[11]-(current_cell[11]-feq[11])/tau_d;
current_cell[12]=current_cell[12]-(current_cell[12]-feq[12])/tau_d;
current_cell[13]=current_cell[13]-(current_cell[13]-feq[13])/tau_d;
current_cell[14]=current_cell[14]-(current_cell[14]-feq[14])/tau_d;
current_cell[15]=current_cell[15]-(current_cell[15]-feq[15])/tau_d;
current_cell[16]=current_cell[16]-(current_cell[16]-feq[16])/tau_d;
current_cell[17]=current_cell[17]-(current_cell[17]-feq[17])/tau_d;
current_cell[18]=current_cell[18]-(current_cell[18]-feq[18])/tau_d;
}
/*
* Performs streaming on cells.
*/
__global__ void DoStreaming(){
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int step=xlength_d+2, idx=x+y*step+z*step*step, nx, ny, nz;
/* check that indices are within the bounds since there could be more threads than needed */
if (0<x && x<(step-1) && 0<y && y<(step-1) && 0<z && z<(step-1)){
nx=x-LATTICE_VELOCITIES_D[0][0];
ny=y-LATTICE_VELOCITIES_D[0][1];
nz=z-LATTICE_VELOCITIES_D[0][2];
stream_field_d[Q_LBM*idx]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)];
nx=x-LATTICE_VELOCITIES_D[1][0];
ny=y-LATTICE_VELOCITIES_D[1][1];
nz=z-LATTICE_VELOCITIES_D[1][2];
stream_field_d[Q_LBM*idx+1]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+1];
nx=x-LATTICE_VELOCITIES_D[2][0];
ny=y-LATTICE_VELOCITIES_D[2][1];
nz=z-LATTICE_VELOCITIES_D[2][2];
stream_field_d[Q_LBM*idx+2]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+2];
nx=x-LATTICE_VELOCITIES_D[3][0];
ny=y-LATTICE_VELOCITIES_D[3][1];
nz=z-LATTICE_VELOCITIES_D[3][2];
stream_field_d[Q_LBM*idx+3]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+3];
nx=x-LATTICE_VELOCITIES_D[4][0];
ny=y-LATTICE_VELOCITIES_D[4][1];
nz=z-LATTICE_VELOCITIES_D[4][2];
stream_field_d[Q_LBM*idx+4]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+4];
nx=x-LATTICE_VELOCITIES_D[5][0];
ny=y-LATTICE_VELOCITIES_D[5][1];
nz=z-LATTICE_VELOCITIES_D[5][2];
stream_field_d[Q_LBM*idx+5]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+5];
nx=x-LATTICE_VELOCITIES_D[6][0];
ny=y-LATTICE_VELOCITIES_D[6][1];
nz=z-LATTICE_VELOCITIES_D[6][2];
stream_field_d[Q_LBM*idx+6]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+6];
nx=x-LATTICE_VELOCITIES_D[7][0];
ny=y-LATTICE_VELOCITIES_D[7][1];
nz=z-LATTICE_VELOCITIES_D[7][2];
stream_field_d[Q_LBM*idx+7]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+7];
nx=x-LATTICE_VELOCITIES_D[8][0];
ny=y-LATTICE_VELOCITIES_D[8][1];
nz=z-LATTICE_VELOCITIES_D[8][2];
stream_field_d[Q_LBM*idx+8]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+8];
nx=x-LATTICE_VELOCITIES_D[9][0];
ny=y-LATTICE_VELOCITIES_D[9][1];
nz=z-LATTICE_VELOCITIES_D[9][2];
stream_field_d[Q_LBM*idx+9]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+9];
nx=x-LATTICE_VELOCITIES_D[10][0];
ny=y-LATTICE_VELOCITIES_D[10][1];
nz=z-LATTICE_VELOCITIES_D[10][2];
stream_field_d[Q_LBM*idx+10]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+10];
nx=x-LATTICE_VELOCITIES_D[11][0];
ny=y-LATTICE_VELOCITIES_D[11][1];
nz=z-LATTICE_VELOCITIES_D[11][2];
stream_field_d[Q_LBM*idx+11]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+11];
nx=x-LATTICE_VELOCITIES_D[12][0];
ny=y-LATTICE_VELOCITIES_D[12][1];
nz=z-LATTICE_VELOCITIES_D[12][2];
stream_field_d[Q_LBM*idx+12]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+12];
nx=x-LATTICE_VELOCITIES_D[13][0];
ny=y-LATTICE_VELOCITIES_D[13][1];
nz=z-LATTICE_VELOCITIES_D[13][2];
stream_field_d[Q_LBM*idx+13]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+13];
nx=x-LATTICE_VELOCITIES_D[14][0];
ny=y-LATTICE_VELOCITIES_D[14][1];
nz=z-LATTICE_VELOCITIES_D[14][2];
stream_field_d[Q_LBM*idx+14]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+14];
nx=x-LATTICE_VELOCITIES_D[15][0];
ny=y-LATTICE_VELOCITIES_D[15][1];
nz=z-LATTICE_VELOCITIES_D[15][2];
stream_field_d[Q_LBM*idx+15]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+15];
nx=x-LATTICE_VELOCITIES_D[16][0];
ny=y-LATTICE_VELOCITIES_D[16][1];
nz=z-LATTICE_VELOCITIES_D[16][2];
stream_field_d[Q_LBM*idx+16]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+16];
nx=x-LATTICE_VELOCITIES_D[17][0];
ny=y-LATTICE_VELOCITIES_D[17][1];
nz=z-LATTICE_VELOCITIES_D[17][2];
stream_field_d[Q_LBM*idx+17]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+17];
nx=x-LATTICE_VELOCITIES_D[18][0];
ny=y-LATTICE_VELOCITIES_D[18][1];
nz=z-LATTICE_VELOCITIES_D[18][2];
stream_field_d[Q_LBM*idx+18]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+18];
}
}
/*
* Performs collision computation.
*/
__global__ void DoCollision(){
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int step=xlength_d+2, idx=x+y*step+z*step*step;
float density, velocity[D_LBM], feq[Q_LBM], *current_cell_s;
/* check that indices are within the bounds since there could be more threads than needed */
if (0<x && x<(step-1) && 0<y && y<(step-1) && 0<z && z<(step-1)){
current_cell_s=&collide_field_d[Q_LBM*idx];
ComputeDensityGpu(current_cell_s,&density);
ComputeVelocityGpu(current_cell_s,&density,velocity);
ComputeFeqGpu(&density,velocity,feq);
ComputePostCollisionDistributionsGpu(current_cell_s,feq);
}
}
/*
* Computes proper boundary values.
*/
__global__ void TreatBoundary(int *flag_field_d){
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int step=xlength_d+2, idx=x+y*step+z*step*step, nx, ny, nz, i, boundary_side=0, boundary_idx=100500;
float density, dot_prod;
if(idx<num_cells_d) {
if(flag_field_d[idx] == BOTTOM_BOUNDARY) {
boundary_side = BOTTOM_BOUNDARY;
boundary_idx = BOTTOM_BOUNDARY_IDX;
} else if (flag_field_d[idx] == LEFT_BOUNDARY) {
boundary_side = LEFT_BOUNDARY;
boundary_idx = LEFT_BOUNDARY_IDX;
} else if (flag_field_d[idx] == RIGHT_BOUNDARY) {
boundary_side = RIGHT_BOUNDARY;
boundary_idx = RIGHT_BOUNDARY_IDX;
} else if (flag_field_d[idx] == BACK_BOUNDARY) {
boundary_side = BACK_BOUNDARY;
boundary_idx = BACK_BOUNDARY_IDX;
} else if (flag_field_d[idx] == FRONT_BOUNDARY) {
boundary_side = FRONT_BOUNDARY;
boundary_idx = FRONT_BOUNDARY_IDX;
} else if (flag_field_d[idx] == LEFT_BOTTOM_EDGE) {
boundary_side = LEFT_BOTTOM_EDGE;
boundary_idx = 13;
} else if (flag_field_d[idx] == RIGHT_BOTTOM_EDGE) {
boundary_side = RIGHT_BOTTOM_EDGE;
boundary_idx = 11;
} else if (flag_field_d[idx] == BACK_BOTTOM_EDGE) {
boundary_side = BACK_BOTTOM_EDGE;
boundary_idx = 18;
} else if (flag_field_d[idx] == FRONT_BOTTOM_EDGE) {
boundary_side = FRONT_BOTTOM_EDGE;
boundary_idx = 4;
} else if (flag_field_d[idx] == LEFT_BACK_EDGE) {
boundary_side = LEFT_BACK_EDGE;
boundary_idx = 17;
} else if (flag_field_d[idx] == LEFT_FRONT_EDGE) {
boundary_side = LEFT_FRONT_EDGE;
boundary_idx = 3;
} else if (flag_field_d[idx] == RIGHT_BACK_EDGE) {
boundary_side = RIGHT_BACK_EDGE;
boundary_idx = 15;
} else if (flag_field_d[idx] == RIGHT_FRONT_EDGE) {
boundary_side = RIGHT_FRONT_EDGE;
boundary_idx = 1;
} else if (flag_field_d[idx] == LEFT_UPPER_EDGE) {
boundary_side = LEFT_UPPER_EDGE;
boundary_idx = 7;
} else if (flag_field_d[idx] == RIGHT_UPPER_EDGE) {
boundary_side = RIGHT_UPPER_EDGE;
boundary_idx = 5;
} else if (flag_field_d[idx] == BACK_UPPER_EDGE) {
boundary_side = BACK_UPPER_EDGE;
boundary_idx = 14;
} else if (flag_field_d[idx] == FRONT_UPPER_EDGE) {
boundary_side = FRONT_UPPER_EDGE;
boundary_idx = 0;
} else if (flag_field_d[idx] == TOP_BOUNDARY) {
boundary_side = TOP_BOUNDARY;
boundary_idx = TOP_BOUNDARY_IDX;
}
if( boundary_side==LEFT_BOUNDARY || boundary_side==RIGHT_BOUNDARY ||
boundary_side==BOTTOM_BOUNDARY ||
boundary_side==BACK_BOUNDARY || boundary_side==FRONT_BOUNDARY) {
i = TREAT_BOUNDARY_INDECES[boundary_idx][0];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
collide_field_d[Q_LBM*(x+y*step+z*step*step)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)];
i = TREAT_BOUNDARY_INDECES[boundary_idx][1];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
collide_field_d[Q_LBM*(x+y*step+z*step*step)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)];
i = TREAT_BOUNDARY_INDECES[boundary_idx][2];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
collide_field_d[Q_LBM*(x+y*step+z*step*step)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)];
i = TREAT_BOUNDARY_INDECES[boundary_idx][3];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
collide_field_d[Q_LBM*(x+y*step+z*step*step)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)];
i = TREAT_BOUNDARY_INDECES[boundary_idx][4];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
collide_field_d[Q_LBM*(x+y*step+z*step*step)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)];
} else if (boundary_side == LEFT_BOTTOM_EDGE || boundary_side == RIGHT_BOTTOM_EDGE ||
boundary_side == BACK_BOTTOM_EDGE || boundary_side == FRONT_BOTTOM_EDGE ||
boundary_side == LEFT_BACK_EDGE || boundary_side == LEFT_FRONT_EDGE ||
boundary_side == RIGHT_BACK_EDGE || boundary_side == RIGHT_FRONT_EDGE) {
nx=x+LATTICE_VELOCITIES_D[boundary_idx][0];
ny=y+LATTICE_VELOCITIES_D[boundary_idx][1];
nz=z+LATTICE_VELOCITIES_D[boundary_idx][2];
collide_field_d[Q_LBM*(x+y*step+z*step*step)+boundary_idx]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(boundary_idx)];
} else if(boundary_side == LEFT_UPPER_EDGE || boundary_side == RIGHT_UPPER_EDGE ||
boundary_side == BACK_UPPER_EDGE || boundary_side == FRONT_UPPER_EDGE) {
i = boundary_idx;
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
/* Compute density in the neighbour cell */
ComputeDensityGpu(&collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)],&density);
/* Compute dot product */
dot_prod=LATTICE_VELOCITIES_D[i][0]*wall_velocity_d[0]+
LATTICE_VELOCITIES_D[i][1]*wall_velocity_d[1]+
LATTICE_VELOCITIES_D[i][2]*wall_velocity_d[2];
/* Assign the boudary cell value */
collide_field_d[Q_LBM*(idx)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)]+
2*LATTICE_WEIGHTS_D[i]*density*C_S_POW2_INV*dot_prod;
} else if(boundary_side == TOP_BOUNDARY) {
i = TREAT_BOUNDARY_INDECES[boundary_idx][0];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
ComputeDensityGpu(&collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)],&density);
dot_prod=LATTICE_VELOCITIES_D[i][0]*wall_velocity_d[0]+
LATTICE_VELOCITIES_D[i][1]*wall_velocity_d[1]+
LATTICE_VELOCITIES_D[i][2]*wall_velocity_d[2];
collide_field_d[Q_LBM*(idx)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)]+
2*LATTICE_WEIGHTS_D[i]*density*C_S_POW2_INV*dot_prod;
i = TREAT_BOUNDARY_INDECES[boundary_idx][1];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
ComputeDensityGpu(&collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)],&density);
dot_prod=LATTICE_VELOCITIES_D[i][0]*wall_velocity_d[0]+
LATTICE_VELOCITIES_D[i][1]*wall_velocity_d[1]+
LATTICE_VELOCITIES_D[i][2]*wall_velocity_d[2];
collide_field_d[Q_LBM*(idx)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)]+
2*LATTICE_WEIGHTS_D[i]*density*C_S_POW2_INV*dot_prod;
i = TREAT_BOUNDARY_INDECES[boundary_idx][2];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
ComputeDensityGpu(&collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)],&density);
dot_prod=LATTICE_VELOCITIES_D[i][0]*wall_velocity_d[0]+
LATTICE_VELOCITIES_D[i][1]*wall_velocity_d[1]+
LATTICE_VELOCITIES_D[i][2]*wall_velocity_d[2];
collide_field_d[Q_LBM*(idx)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)]+
2*LATTICE_WEIGHTS_D[i]*density*C_S_POW2_INV*dot_prod;
i = TREAT_BOUNDARY_INDECES[boundary_idx][3];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
ComputeDensityGpu(&collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)],&density);
dot_prod=LATTICE_VELOCITIES_D[i][0]*wall_velocity_d[0]+
LATTICE_VELOCITIES_D[i][1]*wall_velocity_d[1]+
LATTICE_VELOCITIES_D[i][2]*wall_velocity_d[2];
collide_field_d[Q_LBM*(idx)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)]+
2*LATTICE_WEIGHTS_D[i]*density*C_S_POW2_INV*dot_prod;
i = TREAT_BOUNDARY_INDECES[boundary_idx][4];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
ComputeDensityGpu(&collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)],&density);
dot_prod=LATTICE_VELOCITIES_D[i][0]*wall_velocity_d[0]+
LATTICE_VELOCITIES_D[i][1]*wall_velocity_d[1]+
LATTICE_VELOCITIES_D[i][2]*wall_velocity_d[2];
collide_field_d[Q_LBM*(idx)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)]+
2*LATTICE_WEIGHTS_D[i]*density*C_S_POW2_INV*dot_prod;
}
}
}
/*
* Performs pointer swap on GPU.
*/
__global__ void DoSwap(){
float *swap=collide_field_d; collide_field_d=stream_field_d; stream_field_d=swap;
}
void DoIteration(float *collide_field, float *stream_field, int *flag_field, float tau,
float *wall_velocity, int xlength, float **collide_field_dd, float **stream_field_dd,
int **flag_field_d, float *mlups_sum){
int num_cells = pow(xlength+2, D_LBM);
clock_t mlups_time;
/* initialize constant data */
cudaErrorCheck(hipMemcpyToSymbol(xlength_d, &xlength, sizeof(int), 0, hipMemcpyHostToDevice));
cudaErrorCheck(hipMemcpyToSymbol(num_cells_d, &num_cells, sizeof(int), 0, hipMemcpyHostToDevice));
cudaErrorCheck(hipMemcpyToSymbol(tau_d, &tau, sizeof(float), 0, hipMemcpyHostToDevice));
cudaErrorCheck(hipMemcpyToSymbol(wall_velocity_d, wall_velocity, D_LBM*sizeof(float), 0, hipMemcpyHostToDevice));
cudaErrorCheck(hipMemcpyToSymbol(collide_field_d, collide_field_dd, sizeof(*collide_field_dd), 0, hipMemcpyHostToDevice));
cudaErrorCheck(hipMemcpyToSymbol(stream_field_d, stream_field_dd, sizeof(*stream_field_dd), 0, hipMemcpyHostToDevice));
/* define grid structure */
dim3 block(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((xlength+2+block.x-1)/block.x, (xlength+2+block.y-1)/block.y, (xlength+2+block.z-1)/block.z);
mlups_time = clock();
/* perform streaming */
hipLaunchKernelGGL(( DoStreaming), dim3(grid),dim3(block), 0, 0, );
cudaErrorCheck(hipDeviceSynchronize());
cudaErrorCheck(hipPeekAtLastError());
/* Perform the swapping of collide and stream fields */
hipLaunchKernelGGL(( DoSwap), dim3(1),dim3(1), 0, 0, );
cudaErrorCheck(hipDeviceSynchronize());
cudaErrorCheck(hipPeekAtLastError());
/* perform collision */
hipLaunchKernelGGL(( DoCollision), dim3(grid),dim3(block), 0, 0, );
cudaErrorCheck(hipDeviceSynchronize());
cudaErrorCheck(hipPeekAtLastError());
/* perform boundary treatment */
hipLaunchKernelGGL(( TreatBoundary), dim3(grid),dim3(block), 0, 0, *flag_field_d);
cudaErrorCheck(hipPeekAtLastError());
mlups_time = clock()-mlups_time;
*mlups_sum += num_cells/(MLUPS_EXPONENT*(float)mlups_time/CLOCKS_PER_SEC);
if(VERBOSE)
printf("MLUPS: %f\n", num_cells/(MLUPS_EXPONENT*(float)mlups_time/CLOCKS_PER_SEC));
/* copy data back to host */
cudaErrorCheck(hipMemcpyFromSymbol(collide_field_dd, collide_field_d, sizeof(*collide_field_dd), 0, hipMemcpyDeviceToHost));
cudaErrorCheck(hipMemcpyFromSymbol(stream_field_dd, stream_field_d, sizeof(*stream_field_dd), 0, hipMemcpyDeviceToHost));
}
|
f11a43b1818a3da597c2af726e7cebd02b003078.cu
|
#include <math.h>
#include <stdio.h>
#include "lbm_solver_gpu.h"
#include "lbm_model.h"
#include "lbm_model_gpu.cuh"
#include "utils.h"
#include "utils_gpu.h"
#include "cell_computation_gpu.cuh"
__constant__ float tau_d, wall_velocity_d[D_LBM];
__constant__ int xlength_d, num_cells_d;
__device__ float *stream_field_d, *collide_field_d;
/**
* Computes the post-collision distribution functions according to the BGK update rule and
* stores the results again at the same position.
*/
__device__ void ComputePostCollisionDistributionsGpu(float *current_cell, float *feq){
current_cell[0]=current_cell[0]-(current_cell[0]-feq[0])/tau_d;
current_cell[1]=current_cell[1]-(current_cell[1]-feq[1])/tau_d;
current_cell[2]=current_cell[2]-(current_cell[2]-feq[2])/tau_d;
current_cell[3]=current_cell[3]-(current_cell[3]-feq[3])/tau_d;
current_cell[4]=current_cell[4]-(current_cell[4]-feq[4])/tau_d;
current_cell[5]=current_cell[5]-(current_cell[5]-feq[5])/tau_d;
current_cell[6]=current_cell[6]-(current_cell[6]-feq[6])/tau_d;
current_cell[7]=current_cell[7]-(current_cell[7]-feq[7])/tau_d;
current_cell[8]=current_cell[8]-(current_cell[8]-feq[8])/tau_d;
current_cell[9]=current_cell[9]-(current_cell[9]-feq[9])/tau_d;
current_cell[10]=current_cell[10]-(current_cell[10]-feq[10])/tau_d;
current_cell[11]=current_cell[11]-(current_cell[11]-feq[11])/tau_d;
current_cell[12]=current_cell[12]-(current_cell[12]-feq[12])/tau_d;
current_cell[13]=current_cell[13]-(current_cell[13]-feq[13])/tau_d;
current_cell[14]=current_cell[14]-(current_cell[14]-feq[14])/tau_d;
current_cell[15]=current_cell[15]-(current_cell[15]-feq[15])/tau_d;
current_cell[16]=current_cell[16]-(current_cell[16]-feq[16])/tau_d;
current_cell[17]=current_cell[17]-(current_cell[17]-feq[17])/tau_d;
current_cell[18]=current_cell[18]-(current_cell[18]-feq[18])/tau_d;
}
/*
* Performs streaming on cells.
*/
__global__ void DoStreaming(){
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int step=xlength_d+2, idx=x+y*step+z*step*step, nx, ny, nz;
/* check that indices are within the bounds since there could be more threads than needed */
if (0<x && x<(step-1) && 0<y && y<(step-1) && 0<z && z<(step-1)){
nx=x-LATTICE_VELOCITIES_D[0][0];
ny=y-LATTICE_VELOCITIES_D[0][1];
nz=z-LATTICE_VELOCITIES_D[0][2];
stream_field_d[Q_LBM*idx]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)];
nx=x-LATTICE_VELOCITIES_D[1][0];
ny=y-LATTICE_VELOCITIES_D[1][1];
nz=z-LATTICE_VELOCITIES_D[1][2];
stream_field_d[Q_LBM*idx+1]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+1];
nx=x-LATTICE_VELOCITIES_D[2][0];
ny=y-LATTICE_VELOCITIES_D[2][1];
nz=z-LATTICE_VELOCITIES_D[2][2];
stream_field_d[Q_LBM*idx+2]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+2];
nx=x-LATTICE_VELOCITIES_D[3][0];
ny=y-LATTICE_VELOCITIES_D[3][1];
nz=z-LATTICE_VELOCITIES_D[3][2];
stream_field_d[Q_LBM*idx+3]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+3];
nx=x-LATTICE_VELOCITIES_D[4][0];
ny=y-LATTICE_VELOCITIES_D[4][1];
nz=z-LATTICE_VELOCITIES_D[4][2];
stream_field_d[Q_LBM*idx+4]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+4];
nx=x-LATTICE_VELOCITIES_D[5][0];
ny=y-LATTICE_VELOCITIES_D[5][1];
nz=z-LATTICE_VELOCITIES_D[5][2];
stream_field_d[Q_LBM*idx+5]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+5];
nx=x-LATTICE_VELOCITIES_D[6][0];
ny=y-LATTICE_VELOCITIES_D[6][1];
nz=z-LATTICE_VELOCITIES_D[6][2];
stream_field_d[Q_LBM*idx+6]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+6];
nx=x-LATTICE_VELOCITIES_D[7][0];
ny=y-LATTICE_VELOCITIES_D[7][1];
nz=z-LATTICE_VELOCITIES_D[7][2];
stream_field_d[Q_LBM*idx+7]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+7];
nx=x-LATTICE_VELOCITIES_D[8][0];
ny=y-LATTICE_VELOCITIES_D[8][1];
nz=z-LATTICE_VELOCITIES_D[8][2];
stream_field_d[Q_LBM*idx+8]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+8];
nx=x-LATTICE_VELOCITIES_D[9][0];
ny=y-LATTICE_VELOCITIES_D[9][1];
nz=z-LATTICE_VELOCITIES_D[9][2];
stream_field_d[Q_LBM*idx+9]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+9];
nx=x-LATTICE_VELOCITIES_D[10][0];
ny=y-LATTICE_VELOCITIES_D[10][1];
nz=z-LATTICE_VELOCITIES_D[10][2];
stream_field_d[Q_LBM*idx+10]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+10];
nx=x-LATTICE_VELOCITIES_D[11][0];
ny=y-LATTICE_VELOCITIES_D[11][1];
nz=z-LATTICE_VELOCITIES_D[11][2];
stream_field_d[Q_LBM*idx+11]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+11];
nx=x-LATTICE_VELOCITIES_D[12][0];
ny=y-LATTICE_VELOCITIES_D[12][1];
nz=z-LATTICE_VELOCITIES_D[12][2];
stream_field_d[Q_LBM*idx+12]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+12];
nx=x-LATTICE_VELOCITIES_D[13][0];
ny=y-LATTICE_VELOCITIES_D[13][1];
nz=z-LATTICE_VELOCITIES_D[13][2];
stream_field_d[Q_LBM*idx+13]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+13];
nx=x-LATTICE_VELOCITIES_D[14][0];
ny=y-LATTICE_VELOCITIES_D[14][1];
nz=z-LATTICE_VELOCITIES_D[14][2];
stream_field_d[Q_LBM*idx+14]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+14];
nx=x-LATTICE_VELOCITIES_D[15][0];
ny=y-LATTICE_VELOCITIES_D[15][1];
nz=z-LATTICE_VELOCITIES_D[15][2];
stream_field_d[Q_LBM*idx+15]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+15];
nx=x-LATTICE_VELOCITIES_D[16][0];
ny=y-LATTICE_VELOCITIES_D[16][1];
nz=z-LATTICE_VELOCITIES_D[16][2];
stream_field_d[Q_LBM*idx+16]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+16];
nx=x-LATTICE_VELOCITIES_D[17][0];
ny=y-LATTICE_VELOCITIES_D[17][1];
nz=z-LATTICE_VELOCITIES_D[17][2];
stream_field_d[Q_LBM*idx+17]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+17];
nx=x-LATTICE_VELOCITIES_D[18][0];
ny=y-LATTICE_VELOCITIES_D[18][1];
nz=z-LATTICE_VELOCITIES_D[18][2];
stream_field_d[Q_LBM*idx+18]=collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+18];
}
}
/*
* Performs collision computation.
*/
__global__ void DoCollision(){
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int step=xlength_d+2, idx=x+y*step+z*step*step;
float density, velocity[D_LBM], feq[Q_LBM], *current_cell_s;
/* check that indices are within the bounds since there could be more threads than needed */
if (0<x && x<(step-1) && 0<y && y<(step-1) && 0<z && z<(step-1)){
current_cell_s=&collide_field_d[Q_LBM*idx];
ComputeDensityGpu(current_cell_s,&density);
ComputeVelocityGpu(current_cell_s,&density,velocity);
ComputeFeqGpu(&density,velocity,feq);
ComputePostCollisionDistributionsGpu(current_cell_s,feq);
}
}
/*
* Computes proper boundary values.
*/
__global__ void TreatBoundary(int *flag_field_d){
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int step=xlength_d+2, idx=x+y*step+z*step*step, nx, ny, nz, i, boundary_side=0, boundary_idx=100500;
float density, dot_prod;
if(idx<num_cells_d) {
if(flag_field_d[idx] == BOTTOM_BOUNDARY) {
boundary_side = BOTTOM_BOUNDARY;
boundary_idx = BOTTOM_BOUNDARY_IDX;
} else if (flag_field_d[idx] == LEFT_BOUNDARY) {
boundary_side = LEFT_BOUNDARY;
boundary_idx = LEFT_BOUNDARY_IDX;
} else if (flag_field_d[idx] == RIGHT_BOUNDARY) {
boundary_side = RIGHT_BOUNDARY;
boundary_idx = RIGHT_BOUNDARY_IDX;
} else if (flag_field_d[idx] == BACK_BOUNDARY) {
boundary_side = BACK_BOUNDARY;
boundary_idx = BACK_BOUNDARY_IDX;
} else if (flag_field_d[idx] == FRONT_BOUNDARY) {
boundary_side = FRONT_BOUNDARY;
boundary_idx = FRONT_BOUNDARY_IDX;
} else if (flag_field_d[idx] == LEFT_BOTTOM_EDGE) {
boundary_side = LEFT_BOTTOM_EDGE;
boundary_idx = 13;
} else if (flag_field_d[idx] == RIGHT_BOTTOM_EDGE) {
boundary_side = RIGHT_BOTTOM_EDGE;
boundary_idx = 11;
} else if (flag_field_d[idx] == BACK_BOTTOM_EDGE) {
boundary_side = BACK_BOTTOM_EDGE;
boundary_idx = 18;
} else if (flag_field_d[idx] == FRONT_BOTTOM_EDGE) {
boundary_side = FRONT_BOTTOM_EDGE;
boundary_idx = 4;
} else if (flag_field_d[idx] == LEFT_BACK_EDGE) {
boundary_side = LEFT_BACK_EDGE;
boundary_idx = 17;
} else if (flag_field_d[idx] == LEFT_FRONT_EDGE) {
boundary_side = LEFT_FRONT_EDGE;
boundary_idx = 3;
} else if (flag_field_d[idx] == RIGHT_BACK_EDGE) {
boundary_side = RIGHT_BACK_EDGE;
boundary_idx = 15;
} else if (flag_field_d[idx] == RIGHT_FRONT_EDGE) {
boundary_side = RIGHT_FRONT_EDGE;
boundary_idx = 1;
} else if (flag_field_d[idx] == LEFT_UPPER_EDGE) {
boundary_side = LEFT_UPPER_EDGE;
boundary_idx = 7;
} else if (flag_field_d[idx] == RIGHT_UPPER_EDGE) {
boundary_side = RIGHT_UPPER_EDGE;
boundary_idx = 5;
} else if (flag_field_d[idx] == BACK_UPPER_EDGE) {
boundary_side = BACK_UPPER_EDGE;
boundary_idx = 14;
} else if (flag_field_d[idx] == FRONT_UPPER_EDGE) {
boundary_side = FRONT_UPPER_EDGE;
boundary_idx = 0;
} else if (flag_field_d[idx] == TOP_BOUNDARY) {
boundary_side = TOP_BOUNDARY;
boundary_idx = TOP_BOUNDARY_IDX;
}
if( boundary_side==LEFT_BOUNDARY || boundary_side==RIGHT_BOUNDARY ||
boundary_side==BOTTOM_BOUNDARY ||
boundary_side==BACK_BOUNDARY || boundary_side==FRONT_BOUNDARY) {
i = TREAT_BOUNDARY_INDECES[boundary_idx][0];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
collide_field_d[Q_LBM*(x+y*step+z*step*step)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)];
i = TREAT_BOUNDARY_INDECES[boundary_idx][1];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
collide_field_d[Q_LBM*(x+y*step+z*step*step)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)];
i = TREAT_BOUNDARY_INDECES[boundary_idx][2];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
collide_field_d[Q_LBM*(x+y*step+z*step*step)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)];
i = TREAT_BOUNDARY_INDECES[boundary_idx][3];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
collide_field_d[Q_LBM*(x+y*step+z*step*step)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)];
i = TREAT_BOUNDARY_INDECES[boundary_idx][4];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
collide_field_d[Q_LBM*(x+y*step+z*step*step)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)];
} else if (boundary_side == LEFT_BOTTOM_EDGE || boundary_side == RIGHT_BOTTOM_EDGE ||
boundary_side == BACK_BOTTOM_EDGE || boundary_side == FRONT_BOTTOM_EDGE ||
boundary_side == LEFT_BACK_EDGE || boundary_side == LEFT_FRONT_EDGE ||
boundary_side == RIGHT_BACK_EDGE || boundary_side == RIGHT_FRONT_EDGE) {
nx=x+LATTICE_VELOCITIES_D[boundary_idx][0];
ny=y+LATTICE_VELOCITIES_D[boundary_idx][1];
nz=z+LATTICE_VELOCITIES_D[boundary_idx][2];
collide_field_d[Q_LBM*(x+y*step+z*step*step)+boundary_idx]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(boundary_idx)];
} else if(boundary_side == LEFT_UPPER_EDGE || boundary_side == RIGHT_UPPER_EDGE ||
boundary_side == BACK_UPPER_EDGE || boundary_side == FRONT_UPPER_EDGE) {
i = boundary_idx;
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
/* Compute density in the neighbour cell */
ComputeDensityGpu(&collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)],&density);
/* Compute dot product */
dot_prod=LATTICE_VELOCITIES_D[i][0]*wall_velocity_d[0]+
LATTICE_VELOCITIES_D[i][1]*wall_velocity_d[1]+
LATTICE_VELOCITIES_D[i][2]*wall_velocity_d[2];
/* Assign the boudary cell value */
collide_field_d[Q_LBM*(idx)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)]+
2*LATTICE_WEIGHTS_D[i]*density*C_S_POW2_INV*dot_prod;
} else if(boundary_side == TOP_BOUNDARY) {
i = TREAT_BOUNDARY_INDECES[boundary_idx][0];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
ComputeDensityGpu(&collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)],&density);
dot_prod=LATTICE_VELOCITIES_D[i][0]*wall_velocity_d[0]+
LATTICE_VELOCITIES_D[i][1]*wall_velocity_d[1]+
LATTICE_VELOCITIES_D[i][2]*wall_velocity_d[2];
collide_field_d[Q_LBM*(idx)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)]+
2*LATTICE_WEIGHTS_D[i]*density*C_S_POW2_INV*dot_prod;
i = TREAT_BOUNDARY_INDECES[boundary_idx][1];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
ComputeDensityGpu(&collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)],&density);
dot_prod=LATTICE_VELOCITIES_D[i][0]*wall_velocity_d[0]+
LATTICE_VELOCITIES_D[i][1]*wall_velocity_d[1]+
LATTICE_VELOCITIES_D[i][2]*wall_velocity_d[2];
collide_field_d[Q_LBM*(idx)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)]+
2*LATTICE_WEIGHTS_D[i]*density*C_S_POW2_INV*dot_prod;
i = TREAT_BOUNDARY_INDECES[boundary_idx][2];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
ComputeDensityGpu(&collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)],&density);
dot_prod=LATTICE_VELOCITIES_D[i][0]*wall_velocity_d[0]+
LATTICE_VELOCITIES_D[i][1]*wall_velocity_d[1]+
LATTICE_VELOCITIES_D[i][2]*wall_velocity_d[2];
collide_field_d[Q_LBM*(idx)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)]+
2*LATTICE_WEIGHTS_D[i]*density*C_S_POW2_INV*dot_prod;
i = TREAT_BOUNDARY_INDECES[boundary_idx][3];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
ComputeDensityGpu(&collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)],&density);
dot_prod=LATTICE_VELOCITIES_D[i][0]*wall_velocity_d[0]+
LATTICE_VELOCITIES_D[i][1]*wall_velocity_d[1]+
LATTICE_VELOCITIES_D[i][2]*wall_velocity_d[2];
collide_field_d[Q_LBM*(idx)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)]+
2*LATTICE_WEIGHTS_D[i]*density*C_S_POW2_INV*dot_prod;
i = TREAT_BOUNDARY_INDECES[boundary_idx][4];
nx=x+LATTICE_VELOCITIES_D[i][0];
ny=y+LATTICE_VELOCITIES_D[i][1];
nz=z+LATTICE_VELOCITIES_D[i][2];
ComputeDensityGpu(&collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)],&density);
dot_prod=LATTICE_VELOCITIES_D[i][0]*wall_velocity_d[0]+
LATTICE_VELOCITIES_D[i][1]*wall_velocity_d[1]+
LATTICE_VELOCITIES_D[i][2]*wall_velocity_d[2];
collide_field_d[Q_LBM*(idx)+i]=
collide_field_d[Q_LBM*(nx+ny*step+nz*step*step)+InvGpu(i)]+
2*LATTICE_WEIGHTS_D[i]*density*C_S_POW2_INV*dot_prod;
}
}
}
/*
* Performs pointer swap on GPU.
*/
__global__ void DoSwap(){
float *swap=collide_field_d; collide_field_d=stream_field_d; stream_field_d=swap;
}
void DoIteration(float *collide_field, float *stream_field, int *flag_field, float tau,
float *wall_velocity, int xlength, float **collide_field_dd, float **stream_field_dd,
int **flag_field_d, float *mlups_sum){
int num_cells = pow(xlength+2, D_LBM);
clock_t mlups_time;
/* initialize constant data */
cudaErrorCheck(cudaMemcpyToSymbol(xlength_d, &xlength, sizeof(int), 0, cudaMemcpyHostToDevice));
cudaErrorCheck(cudaMemcpyToSymbol(num_cells_d, &num_cells, sizeof(int), 0, cudaMemcpyHostToDevice));
cudaErrorCheck(cudaMemcpyToSymbol(tau_d, &tau, sizeof(float), 0, cudaMemcpyHostToDevice));
cudaErrorCheck(cudaMemcpyToSymbol(wall_velocity_d, wall_velocity, D_LBM*sizeof(float), 0, cudaMemcpyHostToDevice));
cudaErrorCheck(cudaMemcpyToSymbol(collide_field_d, collide_field_dd, sizeof(*collide_field_dd), 0, cudaMemcpyHostToDevice));
cudaErrorCheck(cudaMemcpyToSymbol(stream_field_d, stream_field_dd, sizeof(*stream_field_dd), 0, cudaMemcpyHostToDevice));
/* define grid structure */
dim3 block(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((xlength+2+block.x-1)/block.x, (xlength+2+block.y-1)/block.y, (xlength+2+block.z-1)/block.z);
mlups_time = clock();
/* perform streaming */
DoStreaming<<<grid,block>>>();
cudaErrorCheck(cudaThreadSynchronize());
cudaErrorCheck(cudaPeekAtLastError());
/* Perform the swapping of collide and stream fields */
DoSwap<<<1,1>>>();
cudaErrorCheck(cudaThreadSynchronize());
cudaErrorCheck(cudaPeekAtLastError());
/* perform collision */
DoCollision<<<grid,block>>>();
cudaErrorCheck(cudaThreadSynchronize());
cudaErrorCheck(cudaPeekAtLastError());
/* perform boundary treatment */
TreatBoundary<<<grid,block>>>(*flag_field_d);
cudaErrorCheck(cudaPeekAtLastError());
mlups_time = clock()-mlups_time;
*mlups_sum += num_cells/(MLUPS_EXPONENT*(float)mlups_time/CLOCKS_PER_SEC);
if(VERBOSE)
printf("MLUPS: %f\n", num_cells/(MLUPS_EXPONENT*(float)mlups_time/CLOCKS_PER_SEC));
/* copy data back to host */
cudaErrorCheck(cudaMemcpyFromSymbol(collide_field_dd, collide_field_d, sizeof(*collide_field_dd), 0, cudaMemcpyDeviceToHost));
cudaErrorCheck(cudaMemcpyFromSymbol(stream_field_dd, stream_field_d, sizeof(*stream_field_dd), 0, cudaMemcpyDeviceToHost));
}
|
85e6672b977f20a43743721988db52e166bcbb72.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include "yololayer.h"
#include "utils.h"
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin()
{
mClassCount = CLASS_NUM;
mYoloKernel.clear();
mYoloKernel.push_back(yolo1);
mYoloKernel.push_back(yolo2);
mYoloKernel.push_back(yolo3);
mKernelCount = mYoloKernel.size();
CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
for(int ii = 0; ii < mKernelCount; ii ++)
{
CUDA_CHECK(hipMalloc(&mAnchor[ii],AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin()
{
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(mYoloKernel.data(),d,kernelSize);
d += kernelSize;
CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
for(int ii = 0; ii < mKernelCount; ii ++)
{
CUDA_CHECK(hipMalloc(&mAnchor[ii],AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(d,mYoloKernel.data(),kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size();
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin *p = new YoloLayerPlugin();
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data){ return 1.0f / (1.0f + expf(-data)); };
__global__ void CalDetection(const float *input, float *output,int noElements,
int yoloWidth,int yoloHeight,const float anchors[CHECK_COUNT*2],int classes,int outputElem) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid*bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < 3; ++k) {
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (box_prob < IGNORE_THRESH) continue;
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float *res_count = output + bnIdx*outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= MAX_OUTPUT_BBOX_COUNT) return;
char* data = (char *)res_count + sizeof(float) + count * sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * INPUT_W / yoloWidth;
det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * INPUT_H / yoloHeight;
det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]);
det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2*k];
det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]);
det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2*k + 1];
det->conf = box_prob * max_cls_prob;
det->class_id = class_id;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, hipStream_t stream, int batchSize) {
int outputElem = 1 + MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
for(int idx = 0 ; idx < batchSize; ++idx) {
CUDA_CHECK(hipMemset(output + idx*outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0; i < mYoloKernel.size(); ++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
hipLaunchKernelGGL(( CalDetection), dim3((yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, 0,
inputs[i], output, numElem, yolo.width, yolo.height, (float *)mAnchor[i], mClassCount, outputElem);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream)
{
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
YoloLayerPlugin* obj = new YoloLayerPlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call MishPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
|
85e6672b977f20a43743721988db52e166bcbb72.cu
|
#include <assert.h>
#include "yololayer.h"
#include "utils.h"
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin()
{
mClassCount = CLASS_NUM;
mYoloKernel.clear();
mYoloKernel.push_back(yolo1);
mYoloKernel.push_back(yolo2);
mYoloKernel.push_back(yolo3);
mKernelCount = mYoloKernel.size();
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
for(int ii = 0; ii < mKernelCount; ii ++)
{
CUDA_CHECK(cudaMalloc(&mAnchor[ii],AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin()
{
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(mYoloKernel.data(),d,kernelSize);
d += kernelSize;
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
for(int ii = 0; ii < mKernelCount; ii ++)
{
CUDA_CHECK(cudaMalloc(&mAnchor[ii],AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(d,mYoloKernel.data(),kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size();
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin *p = new YoloLayerPlugin();
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data){ return 1.0f / (1.0f + expf(-data)); };
__global__ void CalDetection(const float *input, float *output,int noElements,
int yoloWidth,int yoloHeight,const float anchors[CHECK_COUNT*2],int classes,int outputElem) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid*bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < 3; ++k) {
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (box_prob < IGNORE_THRESH) continue;
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float *res_count = output + bnIdx*outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= MAX_OUTPUT_BBOX_COUNT) return;
char* data = (char *)res_count + sizeof(float) + count * sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * INPUT_W / yoloWidth;
det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * INPUT_H / yoloHeight;
det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]);
det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2*k];
det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]);
det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2*k + 1];
det->conf = box_prob * max_cls_prob;
det->class_id = class_id;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, cudaStream_t stream, int batchSize) {
int outputElem = 1 + MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
for(int idx = 0 ; idx < batchSize; ++idx) {
CUDA_CHECK(cudaMemset(output + idx*outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0; i < mYoloKernel.size(); ++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
CalDetection<<< (yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount, mThreadCount>>>
(inputs[i], output, numElem, yolo.width, yolo.height, (float *)mAnchor[i], mClassCount, outputElem);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)
{
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
YoloLayerPlugin* obj = new YoloLayerPlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call MishPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
|
7916f7a641372679ef0bf7355ef862214fbad4a7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ void get_vertex_row_group(int *row_group, bool *dl_matrix, const int vertex_num, const int total_dl_matrix_row_num, const int total_dl_matrix_col_num) {
// printf("%d %d\n", vertex_num, total_dl_matrix_row_num);
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
for (int j = 0, cur_index = i * total_dl_matrix_col_num; j < vertex_num;
j++, cur_index++) {
row_group[i] += (int)(dl_matrix[cur_index]) * (j + 1);
}
}
}
__global__ void get_vertex_row_group(int *row_group, int *dl_matrix, const int vertex_num, const int total_dl_matrix_row_num, const int total_dl_matrix_col_num) {
// printf("%d %d\n", vertex_num, total_dl_matrix_row_num);
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
for (int j = 0; j < vertex_num; j++) {
row_group[i] += dl_matrix[i * total_dl_matrix_col_num + j] * (j + 1);
}
}
}
|
7916f7a641372679ef0bf7355ef862214fbad4a7.cu
|
#include "includes.h"
__device__ void get_vertex_row_group(int *row_group, bool *dl_matrix, const int vertex_num, const int total_dl_matrix_row_num, const int total_dl_matrix_col_num) {
// printf("%d %d\n", vertex_num, total_dl_matrix_row_num);
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
for (int j = 0, cur_index = i * total_dl_matrix_col_num; j < vertex_num;
j++, cur_index++) {
row_group[i] += (int)(dl_matrix[cur_index]) * (j + 1);
}
}
}
__global__ void get_vertex_row_group(int *row_group, int *dl_matrix, const int vertex_num, const int total_dl_matrix_row_num, const int total_dl_matrix_col_num) {
// printf("%d %d\n", vertex_num, total_dl_matrix_row_num);
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
for (int j = 0; j < vertex_num; j++) {
row_group[i] += dl_matrix[i * total_dl_matrix_col_num + j] * (j + 1);
}
}
}
|
73c6a86f3071cdb9e746ffc9216119bfb60dcaf2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Find3DMinMax.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *d_Result = NULL;
hipMalloc(&d_Result, XSIZE*YSIZE);
float *d_Data1 = NULL;
hipMalloc(&d_Data1, XSIZE*YSIZE);
float *d_Data2 = NULL;
hipMalloc(&d_Data2, XSIZE*YSIZE);
float *d_Data3 = NULL;
hipMalloc(&d_Data3, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Find3DMinMax), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Result,d_Data1,d_Data2,d_Data3,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Find3DMinMax), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Result,d_Data1,d_Data2,d_Data3,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Find3DMinMax), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Result,d_Data1,d_Data2,d_Data3,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
73c6a86f3071cdb9e746ffc9216119bfb60dcaf2.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Find3DMinMax.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *d_Result = NULL;
cudaMalloc(&d_Result, XSIZE*YSIZE);
float *d_Data1 = NULL;
cudaMalloc(&d_Data1, XSIZE*YSIZE);
float *d_Data2 = NULL;
cudaMalloc(&d_Data2, XSIZE*YSIZE);
float *d_Data3 = NULL;
cudaMalloc(&d_Data3, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Find3DMinMax<<<gridBlock,threadBlock>>>(d_Result,d_Data1,d_Data2,d_Data3,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Find3DMinMax<<<gridBlock,threadBlock>>>(d_Result,d_Data1,d_Data2,d_Data3,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Find3DMinMax<<<gridBlock,threadBlock>>>(d_Result,d_Data1,d_Data2,d_Data3,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
a99f75e5fbf3bd29fc5020a0e12854f9f8794db4.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
// modified by James Atlas to show some performance optimizations
// Potentially useful macro for catching CUDA errors:
#define CUDA_TRY(...) \
do { \
hipError_t err = (__VA_ARGS__); \
if (err != hipSuccess) { \
fprintf(stderr, "[%s:%d] ", __FILE__, __LINE__); \
fprintf(stderr, "__VA_ARGS__ "); \
fprintf(stderr, "(msg: %s)\n", hipGetErrorString(err)); \
exit(EXIT_FAILURE); \
} \
} while(0)
// usage: CUDA_TRY(hipMalloc(....));
// CUDA_TRY(hipMemcpy(....));
// CUDA_TRY(hipDeviceSynchronize());
//
// the source file and line number will be reported in the message
#include <chrono>
#include <random>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <ctime>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int main(int argc, char** args) {
// Print the vector length to be used, and compute its size
const int numElements = (argc > 1) ? std::stoi(args[1]) : 50000;
const int threadsPerBlock = (argc > 2) ? std::stoi(args[2]) : 256;;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
CUDA_TRY(hipMalloc((void **)&d_A, size));
// Allocate the device input vector B
float *d_B = NULL;
CUDA_TRY(hipMalloc((void **)&d_B, size));
// Allocate the device output vector C
float *d_C = NULL;
CUDA_TRY(hipMalloc((void **)&d_C, size));
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
CUDA_TRY(hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice));
CUDA_TRY(hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice));
// Launch the Vector Add CUDA Kernel
int blocksPerGrid = numElements / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
auto timeStart = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
CUDA_TRY(hipGetLastError());
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
CUDA_TRY(hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost));
auto timeStop = std::chrono::steady_clock::now();
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
auto timeInSeconds = std::chrono::duration<float>(timeStop - timeStart).count();
printf("Total time was %.6f seconds\n", timeInSeconds);
// Free device global memory
CUDA_TRY(hipFree(d_A));
CUDA_TRY(hipFree(d_B));
CUDA_TRY(hipFree(d_C));
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
CUDA_TRY(hipDeviceReset());
printf("Done\n");
return 0;
}
|
a99f75e5fbf3bd29fc5020a0e12854f9f8794db4.cu
|
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
// modified by James Atlas to show some performance optimizations
// Potentially useful macro for catching CUDA errors:
#define CUDA_TRY(...) \
do { \
cudaError_t err = (__VA_ARGS__); \
if (err != cudaSuccess) { \
fprintf(stderr, "[%s:%d] ", __FILE__, __LINE__); \
fprintf(stderr, "__VA_ARGS__ "); \
fprintf(stderr, "(msg: %s)\n", cudaGetErrorString(err)); \
exit(EXIT_FAILURE); \
} \
} while(0)
// usage: CUDA_TRY(cudaMalloc(....));
// CUDA_TRY(cudaMemcpy(....));
// CUDA_TRY(cudaDeviceSynchronize());
//
// the source file and line number will be reported in the message
#include <chrono>
#include <random>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <ctime>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int main(int argc, char** args) {
// Print the vector length to be used, and compute its size
const int numElements = (argc > 1) ? std::stoi(args[1]) : 50000;
const int threadsPerBlock = (argc > 2) ? std::stoi(args[2]) : 256;;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
CUDA_TRY(cudaMalloc((void **)&d_A, size));
// Allocate the device input vector B
float *d_B = NULL;
CUDA_TRY(cudaMalloc((void **)&d_B, size));
// Allocate the device output vector C
float *d_C = NULL;
CUDA_TRY(cudaMalloc((void **)&d_C, size));
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
CUDA_TRY(cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice));
CUDA_TRY(cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice));
// Launch the Vector Add CUDA Kernel
int blocksPerGrid = numElements / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
auto timeStart = std::chrono::steady_clock::now();
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
CUDA_TRY(cudaGetLastError());
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
CUDA_TRY(cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost));
auto timeStop = std::chrono::steady_clock::now();
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
auto timeInSeconds = std::chrono::duration<float>(timeStop - timeStart).count();
printf("Total time was %.6f seconds\n", timeInSeconds);
// Free device global memory
CUDA_TRY(cudaFree(d_A));
CUDA_TRY(cudaFree(d_B));
CUDA_TRY(cudaFree(d_C));
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
CUDA_TRY(cudaDeviceReset());
printf("Done\n");
return 0;
}
|
6e25ee38fcda342963870f1fe1cb67676bff0d59.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gtest/gtest.h"
// TODO: make it less vague(use proper include)
#include "../gpu/kmeans/kmeans_centroids.h"
#include <thrust/host_vector.h>
TEST(KMeansCentroids, CalculateCentroids) {
// GIVEN
int k = 2;
int d = 2;
int n = 4;
// Setup data
thrust::host_vector<float> dataHost(n * d);
dataHost[0] = 0.0f;
dataHost[1] = 0.0f; // [0,0]
dataHost[2] = 0.0f;
dataHost[3] = 1.0f; // [0,1]
dataHost[4] = 1.0f;
dataHost[5] = 1.0f; // [1,1]
dataHost[6] = 1.0f;
dataHost[7] = 0.0f; // [1,1]
thrust::device_vector<float> dataDevice(n * d);
dataDevice = dataHost;
// Setup counts
thrust::device_vector<int> countsDevice(k);
countsDevice[0] = 0;
countsDevice[1] = 0;
// Setup labels
thrust::host_vector<int> labelsHost(n);
labelsHost[0] = 0; // label for [0,0] -> 0
labelsHost[1] = 0; // label for [0,1] -> 0
labelsHost[2] = 1; // label for [1,1] -> 1
labelsHost[3] = 1; // label for [1,0] -> 1
thrust::device_vector<int> labelsDevice(n);
labelsDevice = labelsHost;
// Setup indices
thrust::host_vector<int> indicesHost(n);
indicesHost[0] = 0;
indicesHost[1] = 1;
indicesHost[2] = 2;
indicesHost[3] = 3;
thrust::device_vector<int> indicesDevice(n);
indicesDevice = indicesHost;
// Setup centroids
thrust::host_vector<float> centroidsHost(d * k);
centroidsHost[0] = 0.0f;
centroidsHost[1] = 0.0f;
centroidsHost[2] = 0.0f;
centroidsHost[3] = 0.0f;
thrust::device_vector<float> centroidsDevice(d * k);
centroidsDevice = centroidsHost;
int n_threads_x = 64;
int n_threads_y = 16;
kmeans::detail::
hipLaunchKernelGGL(( calculate_centroids), dim3(dim3(1, 30)), dim3(dim3(n_threads_x, n_threads_y)), 0, 0,
n, d, k, thrust::raw_pointer_cast(dataDevice.data()),
thrust::raw_pointer_cast(labelsDevice.data()),
thrust::raw_pointer_cast(indicesDevice.data()),
thrust::raw_pointer_cast(centroidsDevice.data()),
thrust::raw_pointer_cast(countsDevice.data()));
// THEN
centroidsHost = centroidsDevice;
ASSERT_FLOAT_EQ(0.0f, centroidsHost.data()[0]);
ASSERT_FLOAT_EQ(1.0f, centroidsHost.data()[1]);
ASSERT_FLOAT_EQ(2.0f, centroidsHost.data()[2]);
ASSERT_FLOAT_EQ(1.0f, centroidsHost.data()[3]);
SUCCEED();
}
// Calculating centroids "on 2 GPUs" should yield the same result as
// calculating centroids on 1 GPU from all data
TEST(KMeansCentroids, CalculateCentroids2GPU) {
/**
* CALCULATE CENTROIDS IN 2 TURNS, EACH TIME FROM HALF THE DATA
* */
// GIVEN
int k = 2;
int d = 2;
int n = 3;
thrust::host_vector<float> dataHost(n * d);
thrust::device_vector<float> dataDevice(n * d);
thrust::device_vector<int> countsDevice(k);
thrust::host_vector<int> labelsHost(n);
thrust::device_vector<int> labelsDevice(n);
thrust::host_vector<float> centroidsHost(d * k);
thrust::device_vector<float> centroidsDevice(d * k);
thrust::host_vector<float> centroidsHostFirst(d * k);
thrust::host_vector<int> indicesHost(n);
thrust::device_vector<int> indicesDevice(n);
int n_threads_x = 64;
int n_threads_y = 16;
indicesHost[0] = 0;
indicesHost[1] = 1;
indicesHost[2] = 2;
dataHost[0] = 4.0f;
dataHost[1] = 2.0f; // [4,2]
dataHost[2] = 1.0f;
dataHost[3] = 0.0f; // [1,0]
dataHost[4] = 4.0f;
dataHost[5] = 0.0f; // [4,0]
countsDevice[0] = 0;
countsDevice[1] = 0;
labelsHost[0] = 0;
labelsHost[1] = 0;
labelsHost[2] = 0;
centroidsHost[0] = 0.0f;
centroidsHost[1] = 0.0f;
centroidsHost[2] = 0.0f;
centroidsHost[3] = 0.0f;
indicesDevice = indicesHost;
dataDevice = dataHost;
labelsDevice = labelsHost;
centroidsDevice = centroidsHost;
// Run on "gpu1"
kmeans::detail::
hipLaunchKernelGGL(( calculate_centroids), dim3(dim3(1, 30)), dim3(dim3(n_threads_x, n_threads_y)), 0, 0,
n, d, k, thrust::raw_pointer_cast(dataDevice.data()),
thrust::raw_pointer_cast(labelsDevice.data()),
thrust::raw_pointer_cast(indicesDevice.data()),
thrust::raw_pointer_cast(centroidsDevice.data()),
thrust::raw_pointer_cast(countsDevice.data()));
centroidsHostFirst = centroidsDevice;
// Setup data for "gpu2"
dataHost[0] = 4.0f;
dataHost[1] = 4.0f; // [4,4]
dataHost[2] = 1.0f;
dataHost[3] = 4.0f; // [1,4]
dataHost[4] = 1.0f;
dataHost[5] = 2.0f; // [1,2]
countsDevice[0] = 0;
countsDevice[1] = 0;
labelsHost[0] = 0;
labelsHost[1] = 1;
labelsHost[2] = 1;
centroidsHost[0] = 0.0f;
centroidsHost[1] = 0.0f;
centroidsHost[2] = 0.0f;
centroidsHost[3] = 0.0f;
dataDevice = dataHost;
labelsDevice = labelsHost;
centroidsDevice = centroidsHost;
kmeans::detail::memzero(countsDevice);
kmeans::detail::memzero(centroidsDevice);
// Run on "gpu2"
kmeans::detail::
hipLaunchKernelGGL(( calculate_centroids), dim3(dim3(1, 30)), dim3(dim3(n_threads_x, n_threads_y)), 0, 0,
n, d, k, thrust::raw_pointer_cast(dataDevice.data()),
thrust::raw_pointer_cast(labelsDevice.data()),
thrust::raw_pointer_cast(indicesDevice.data()),
thrust::raw_pointer_cast(centroidsDevice.data()),
thrust::raw_pointer_cast(countsDevice.data()));
centroidsHost = centroidsDevice;
centroidsHost.data()[0] += centroidsHostFirst.data()[0];
centroidsHost.data()[1] += centroidsHostFirst.data()[1];
centroidsHost.data()[2] += centroidsHostFirst.data()[2];
centroidsHost.data()[3] += centroidsHostFirst.data()[3];
/**
* CALCULATE CENTROIDS IN 1 TURN, FROM ALL DATA
* */
k = 2;
d = 2;
n = 6;
// Setup data
thrust::host_vector<float> dataHost2(n * d);
dataHost2[0] = 4.0f;
dataHost2[1] = 2.0f; // [0,0]
dataHost2[2] = 1.0f;
dataHost2[3] = 0.0f; // [0,1]
dataHost2[4] = 4.0f;
dataHost2[5] = 0.0f; // [1,1]
dataHost2[6] = 4.0f;
dataHost2[7] = 4.0f; // [1,1]
dataHost2[8] = 1.0f;
dataHost2[9] = 4.0f; // [1,1]
dataHost2[10] = 1.0f;
dataHost2[11] = 2.0f; // [1,1]
thrust::device_vector<float> dataDevice2(n * d);
dataDevice2 = dataHost2;
// Setup counts
thrust::device_vector<int> countsDevice2(k);
// Setup labels
thrust::host_vector<int> labelsHost2(n);
labelsHost2[0] = 0;
labelsHost2[1] = 0;
labelsHost2[2] = 0;
labelsHost2[3] = 0;
labelsHost2[4] = 1;
labelsHost2[5] = 1;
thrust::device_vector<int> labelsDevice2(n);
labelsDevice2 = labelsHost2;
// Setup indices
thrust::host_vector<int> indicesHost2(n);
indicesHost2[0] = 0;
indicesHost2[1] = 1;
indicesHost2[2] = 2;
indicesHost2[3] = 3;
indicesHost2[4] = 4;
indicesHost2[5] = 5;
thrust::device_vector<int> indicesDevice2(n);
indicesDevice2 = indicesHost2;
// Setup centroids
thrust::device_vector<float> centroidsDevice2(d * k);
kmeans::detail::memzero(countsDevice2);
kmeans::detail::memzero(centroidsDevice2);
kmeans::detail::
hipLaunchKernelGGL(( calculate_centroids), dim3(dim3(1, 30)), dim3(dim3(n_threads_x, n_threads_y)), 0, 0,
n, d, k, thrust::raw_pointer_cast(dataDevice2.data()),
thrust::raw_pointer_cast(labelsDevice2.data()),
thrust::raw_pointer_cast(indicesDevice2.data()),
thrust::raw_pointer_cast(centroidsDevice2.data()),
thrust::raw_pointer_cast(countsDevice2.data()));
// THEN
thrust::host_vector<float> centroidsHost2(d * k);
centroidsHost2 = centroidsDevice2;
ASSERT_FLOAT_EQ(centroidsHost2.data()[0], centroidsHost.data()[0]);
ASSERT_FLOAT_EQ(centroidsHost2.data()[1], centroidsHost.data()[1]);
ASSERT_FLOAT_EQ(centroidsHost2.data()[2], centroidsHost.data()[2]);
ASSERT_FLOAT_EQ(centroidsHost2.data()[3], centroidsHost.data()[3]);
SUCCEED();
}
TEST(KMeansCentroids, RevertCentroidZeroing) {
// GIVEN
int k = 3;
int d = 2;
// Setup counts
thrust::host_vector<int> countsHost(k);
countsHost[0] = 1;
countsHost[1] = 0;
countsHost[2] = 1;
thrust::device_vector<int> countsDevice(k);
countsDevice = countsHost;
// Setup tmp centroids (original)
thrust::host_vector<float> tmp_centroidsHost(d * k);
tmp_centroidsHost[0] = 1.0f;
tmp_centroidsHost[1] = 1.0f;
tmp_centroidsHost[2] = 2.0f;
tmp_centroidsHost[3] = 2.0f;
tmp_centroidsHost[4] = 3.0f;
tmp_centroidsHost[5] = 3.0f;
thrust::device_vector<float> tmp_centroidsDevice(d * k);
tmp_centroidsDevice = tmp_centroidsHost;
// Setup centroids
thrust::host_vector<float> centroidsHost(d * k);
centroidsHost[0] = 5.0f;
centroidsHost[1] = 5.0f;
centroidsHost[2] = 0.0f;
centroidsHost[3] = 0.0f;
centroidsHost[4] = 4.0f;
centroidsHost[5] = 4.0f;
thrust::device_vector<float> centroidsDevice(d * k);
centroidsDevice = centroidsHost;
// WHEN
hipLaunchKernelGGL(( kmeans::detail::revert_zeroed_centroids),
dim3(dim3((d - 1) / 32 + 1, (k - 1) / 32 + 1)), dim3(dim3(32, 32)), 0, 0,
d, k, thrust::raw_pointer_cast(tmp_centroidsDevice.data()),
thrust::raw_pointer_cast(centroidsDevice.data()),
thrust::raw_pointer_cast(countsDevice.data()));
// THEN
centroidsHost = centroidsDevice;
ASSERT_FLOAT_EQ(5.0f, centroidsHost.data()[0]);
ASSERT_FLOAT_EQ(5.0f, centroidsHost.data()[1]);
ASSERT_FLOAT_EQ(2.0f, centroidsHost.data()[2]);
ASSERT_FLOAT_EQ(2.0f, centroidsHost.data()[3]);
ASSERT_FLOAT_EQ(4.0f, centroidsHost.data()[4]);
ASSERT_FLOAT_EQ(4.0f, centroidsHost.data()[5]);
SUCCEED();
}
TEST(KMeansCentroids, CentroidsScaling) {
// GIVEN
int k = 2;
int d = 2;
// Setup counts
thrust::host_vector<int> countsHost(k);
countsHost[0] = 4;
countsHost[1] = 2;
thrust::device_vector<int> countsDevice(k);
countsDevice = countsHost;
// Setup centroids
thrust::host_vector<float> centroidsHost(d * k);
centroidsHost[0] = 1.0f;
centroidsHost[1] = 2.0f;
centroidsHost[2] = 3.0f;
centroidsHost[3] = 4.0f;
thrust::device_vector<float> centroidsDevice(d * k);
centroidsDevice = centroidsHost;
// WHEN
hipLaunchKernelGGL(( kmeans::detail::scale_centroids), dim3(dim3((d - 1) / 32 + 1, (k - 1) / 32 + 1)),
dim3(dim3(32, 32)), 0, 0,
d, k, thrust::raw_pointer_cast(countsDevice.data()),
thrust::raw_pointer_cast(centroidsDevice.data()));
// THEN
centroidsHost = centroidsDevice;
ASSERT_FLOAT_EQ(0.25f, centroidsHost.data()[0]);
ASSERT_FLOAT_EQ(0.5f, centroidsHost.data()[1]);
ASSERT_FLOAT_EQ(1.5f, centroidsHost.data()[2]);
ASSERT_FLOAT_EQ(2.0f, centroidsHost.data()[3]);
SUCCEED();
}
|
6e25ee38fcda342963870f1fe1cb67676bff0d59.cu
|
#include "gtest/gtest.h"
// TODO: make it less vague(use proper include)
#include "../gpu/kmeans/kmeans_centroids.h"
#include <thrust/host_vector.h>
TEST(KMeansCentroids, CalculateCentroids) {
// GIVEN
int k = 2;
int d = 2;
int n = 4;
// Setup data
thrust::host_vector<float> dataHost(n * d);
dataHost[0] = 0.0f;
dataHost[1] = 0.0f; // [0,0]
dataHost[2] = 0.0f;
dataHost[3] = 1.0f; // [0,1]
dataHost[4] = 1.0f;
dataHost[5] = 1.0f; // [1,1]
dataHost[6] = 1.0f;
dataHost[7] = 0.0f; // [1,1]
thrust::device_vector<float> dataDevice(n * d);
dataDevice = dataHost;
// Setup counts
thrust::device_vector<int> countsDevice(k);
countsDevice[0] = 0;
countsDevice[1] = 0;
// Setup labels
thrust::host_vector<int> labelsHost(n);
labelsHost[0] = 0; // label for [0,0] -> 0
labelsHost[1] = 0; // label for [0,1] -> 0
labelsHost[2] = 1; // label for [1,1] -> 1
labelsHost[3] = 1; // label for [1,0] -> 1
thrust::device_vector<int> labelsDevice(n);
labelsDevice = labelsHost;
// Setup indices
thrust::host_vector<int> indicesHost(n);
indicesHost[0] = 0;
indicesHost[1] = 1;
indicesHost[2] = 2;
indicesHost[3] = 3;
thrust::device_vector<int> indicesDevice(n);
indicesDevice = indicesHost;
// Setup centroids
thrust::host_vector<float> centroidsHost(d * k);
centroidsHost[0] = 0.0f;
centroidsHost[1] = 0.0f;
centroidsHost[2] = 0.0f;
centroidsHost[3] = 0.0f;
thrust::device_vector<float> centroidsDevice(d * k);
centroidsDevice = centroidsHost;
int n_threads_x = 64;
int n_threads_y = 16;
kmeans::detail::
calculate_centroids<<<dim3(1, 30), dim3(n_threads_x, n_threads_y), 0>>>(
n, d, k, thrust::raw_pointer_cast(dataDevice.data()),
thrust::raw_pointer_cast(labelsDevice.data()),
thrust::raw_pointer_cast(indicesDevice.data()),
thrust::raw_pointer_cast(centroidsDevice.data()),
thrust::raw_pointer_cast(countsDevice.data()));
// THEN
centroidsHost = centroidsDevice;
ASSERT_FLOAT_EQ(0.0f, centroidsHost.data()[0]);
ASSERT_FLOAT_EQ(1.0f, centroidsHost.data()[1]);
ASSERT_FLOAT_EQ(2.0f, centroidsHost.data()[2]);
ASSERT_FLOAT_EQ(1.0f, centroidsHost.data()[3]);
SUCCEED();
}
// Calculating centroids "on 2 GPUs" should yield the same result as
// calculating centroids on 1 GPU from all data
TEST(KMeansCentroids, CalculateCentroids2GPU) {
/**
* CALCULATE CENTROIDS IN 2 TURNS, EACH TIME FROM HALF THE DATA
* */
// GIVEN
int k = 2;
int d = 2;
int n = 3;
thrust::host_vector<float> dataHost(n * d);
thrust::device_vector<float> dataDevice(n * d);
thrust::device_vector<int> countsDevice(k);
thrust::host_vector<int> labelsHost(n);
thrust::device_vector<int> labelsDevice(n);
thrust::host_vector<float> centroidsHost(d * k);
thrust::device_vector<float> centroidsDevice(d * k);
thrust::host_vector<float> centroidsHostFirst(d * k);
thrust::host_vector<int> indicesHost(n);
thrust::device_vector<int> indicesDevice(n);
int n_threads_x = 64;
int n_threads_y = 16;
indicesHost[0] = 0;
indicesHost[1] = 1;
indicesHost[2] = 2;
dataHost[0] = 4.0f;
dataHost[1] = 2.0f; // [4,2]
dataHost[2] = 1.0f;
dataHost[3] = 0.0f; // [1,0]
dataHost[4] = 4.0f;
dataHost[5] = 0.0f; // [4,0]
countsDevice[0] = 0;
countsDevice[1] = 0;
labelsHost[0] = 0;
labelsHost[1] = 0;
labelsHost[2] = 0;
centroidsHost[0] = 0.0f;
centroidsHost[1] = 0.0f;
centroidsHost[2] = 0.0f;
centroidsHost[3] = 0.0f;
indicesDevice = indicesHost;
dataDevice = dataHost;
labelsDevice = labelsHost;
centroidsDevice = centroidsHost;
// Run on "gpu1"
kmeans::detail::
calculate_centroids<<<dim3(1, 30), dim3(n_threads_x, n_threads_y), 0>>>(
n, d, k, thrust::raw_pointer_cast(dataDevice.data()),
thrust::raw_pointer_cast(labelsDevice.data()),
thrust::raw_pointer_cast(indicesDevice.data()),
thrust::raw_pointer_cast(centroidsDevice.data()),
thrust::raw_pointer_cast(countsDevice.data()));
centroidsHostFirst = centroidsDevice;
// Setup data for "gpu2"
dataHost[0] = 4.0f;
dataHost[1] = 4.0f; // [4,4]
dataHost[2] = 1.0f;
dataHost[3] = 4.0f; // [1,4]
dataHost[4] = 1.0f;
dataHost[5] = 2.0f; // [1,2]
countsDevice[0] = 0;
countsDevice[1] = 0;
labelsHost[0] = 0;
labelsHost[1] = 1;
labelsHost[2] = 1;
centroidsHost[0] = 0.0f;
centroidsHost[1] = 0.0f;
centroidsHost[2] = 0.0f;
centroidsHost[3] = 0.0f;
dataDevice = dataHost;
labelsDevice = labelsHost;
centroidsDevice = centroidsHost;
kmeans::detail::memzero(countsDevice);
kmeans::detail::memzero(centroidsDevice);
// Run on "gpu2"
kmeans::detail::
calculate_centroids<<<dim3(1, 30), dim3(n_threads_x, n_threads_y), 0>>>(
n, d, k, thrust::raw_pointer_cast(dataDevice.data()),
thrust::raw_pointer_cast(labelsDevice.data()),
thrust::raw_pointer_cast(indicesDevice.data()),
thrust::raw_pointer_cast(centroidsDevice.data()),
thrust::raw_pointer_cast(countsDevice.data()));
centroidsHost = centroidsDevice;
centroidsHost.data()[0] += centroidsHostFirst.data()[0];
centroidsHost.data()[1] += centroidsHostFirst.data()[1];
centroidsHost.data()[2] += centroidsHostFirst.data()[2];
centroidsHost.data()[3] += centroidsHostFirst.data()[3];
/**
* CALCULATE CENTROIDS IN 1 TURN, FROM ALL DATA
* */
k = 2;
d = 2;
n = 6;
// Setup data
thrust::host_vector<float> dataHost2(n * d);
dataHost2[0] = 4.0f;
dataHost2[1] = 2.0f; // [0,0]
dataHost2[2] = 1.0f;
dataHost2[3] = 0.0f; // [0,1]
dataHost2[4] = 4.0f;
dataHost2[5] = 0.0f; // [1,1]
dataHost2[6] = 4.0f;
dataHost2[7] = 4.0f; // [1,1]
dataHost2[8] = 1.0f;
dataHost2[9] = 4.0f; // [1,1]
dataHost2[10] = 1.0f;
dataHost2[11] = 2.0f; // [1,1]
thrust::device_vector<float> dataDevice2(n * d);
dataDevice2 = dataHost2;
// Setup counts
thrust::device_vector<int> countsDevice2(k);
// Setup labels
thrust::host_vector<int> labelsHost2(n);
labelsHost2[0] = 0;
labelsHost2[1] = 0;
labelsHost2[2] = 0;
labelsHost2[3] = 0;
labelsHost2[4] = 1;
labelsHost2[5] = 1;
thrust::device_vector<int> labelsDevice2(n);
labelsDevice2 = labelsHost2;
// Setup indices
thrust::host_vector<int> indicesHost2(n);
indicesHost2[0] = 0;
indicesHost2[1] = 1;
indicesHost2[2] = 2;
indicesHost2[3] = 3;
indicesHost2[4] = 4;
indicesHost2[5] = 5;
thrust::device_vector<int> indicesDevice2(n);
indicesDevice2 = indicesHost2;
// Setup centroids
thrust::device_vector<float> centroidsDevice2(d * k);
kmeans::detail::memzero(countsDevice2);
kmeans::detail::memzero(centroidsDevice2);
kmeans::detail::
calculate_centroids<<<dim3(1, 30), dim3(n_threads_x, n_threads_y), 0>>>(
n, d, k, thrust::raw_pointer_cast(dataDevice2.data()),
thrust::raw_pointer_cast(labelsDevice2.data()),
thrust::raw_pointer_cast(indicesDevice2.data()),
thrust::raw_pointer_cast(centroidsDevice2.data()),
thrust::raw_pointer_cast(countsDevice2.data()));
// THEN
thrust::host_vector<float> centroidsHost2(d * k);
centroidsHost2 = centroidsDevice2;
ASSERT_FLOAT_EQ(centroidsHost2.data()[0], centroidsHost.data()[0]);
ASSERT_FLOAT_EQ(centroidsHost2.data()[1], centroidsHost.data()[1]);
ASSERT_FLOAT_EQ(centroidsHost2.data()[2], centroidsHost.data()[2]);
ASSERT_FLOAT_EQ(centroidsHost2.data()[3], centroidsHost.data()[3]);
SUCCEED();
}
TEST(KMeansCentroids, RevertCentroidZeroing) {
// GIVEN
int k = 3;
int d = 2;
// Setup counts
thrust::host_vector<int> countsHost(k);
countsHost[0] = 1;
countsHost[1] = 0;
countsHost[2] = 1;
thrust::device_vector<int> countsDevice(k);
countsDevice = countsHost;
// Setup tmp centroids (original)
thrust::host_vector<float> tmp_centroidsHost(d * k);
tmp_centroidsHost[0] = 1.0f;
tmp_centroidsHost[1] = 1.0f;
tmp_centroidsHost[2] = 2.0f;
tmp_centroidsHost[3] = 2.0f;
tmp_centroidsHost[4] = 3.0f;
tmp_centroidsHost[5] = 3.0f;
thrust::device_vector<float> tmp_centroidsDevice(d * k);
tmp_centroidsDevice = tmp_centroidsHost;
// Setup centroids
thrust::host_vector<float> centroidsHost(d * k);
centroidsHost[0] = 5.0f;
centroidsHost[1] = 5.0f;
centroidsHost[2] = 0.0f;
centroidsHost[3] = 0.0f;
centroidsHost[4] = 4.0f;
centroidsHost[5] = 4.0f;
thrust::device_vector<float> centroidsDevice(d * k);
centroidsDevice = centroidsHost;
// WHEN
kmeans::detail::revert_zeroed_centroids<<<
dim3((d - 1) / 32 + 1, (k - 1) / 32 + 1), dim3(32, 32), 0>>>(
d, k, thrust::raw_pointer_cast(tmp_centroidsDevice.data()),
thrust::raw_pointer_cast(centroidsDevice.data()),
thrust::raw_pointer_cast(countsDevice.data()));
// THEN
centroidsHost = centroidsDevice;
ASSERT_FLOAT_EQ(5.0f, centroidsHost.data()[0]);
ASSERT_FLOAT_EQ(5.0f, centroidsHost.data()[1]);
ASSERT_FLOAT_EQ(2.0f, centroidsHost.data()[2]);
ASSERT_FLOAT_EQ(2.0f, centroidsHost.data()[3]);
ASSERT_FLOAT_EQ(4.0f, centroidsHost.data()[4]);
ASSERT_FLOAT_EQ(4.0f, centroidsHost.data()[5]);
SUCCEED();
}
TEST(KMeansCentroids, CentroidsScaling) {
// GIVEN
int k = 2;
int d = 2;
// Setup counts
thrust::host_vector<int> countsHost(k);
countsHost[0] = 4;
countsHost[1] = 2;
thrust::device_vector<int> countsDevice(k);
countsDevice = countsHost;
// Setup centroids
thrust::host_vector<float> centroidsHost(d * k);
centroidsHost[0] = 1.0f;
centroidsHost[1] = 2.0f;
centroidsHost[2] = 3.0f;
centroidsHost[3] = 4.0f;
thrust::device_vector<float> centroidsDevice(d * k);
centroidsDevice = centroidsHost;
// WHEN
kmeans::detail::scale_centroids<<<dim3((d - 1) / 32 + 1, (k - 1) / 32 + 1),
dim3(32, 32), 0>>>(
d, k, thrust::raw_pointer_cast(countsDevice.data()),
thrust::raw_pointer_cast(centroidsDevice.data()));
// THEN
centroidsHost = centroidsDevice;
ASSERT_FLOAT_EQ(0.25f, centroidsHost.data()[0]);
ASSERT_FLOAT_EQ(0.5f, centroidsHost.data()[1]);
ASSERT_FLOAT_EQ(1.5f, centroidsHost.data()[2]);
ASSERT_FLOAT_EQ(2.0f, centroidsHost.data()[3]);
SUCCEED();
}
|
a84323ec49baf3c4c6dce068e180bceaac53f934.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
hipFree(d_x);
hipFree(d_y);
free(x);
free(y);
}
|
a84323ec49baf3c4c6dce068e180bceaac53f934.cu
|
#include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
}
|
4752757c7974e7bc398f44d1e5ef733560c8312b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void initArray(uint32_t * path, double *approx, uint32_t *top_k, int n){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < n){
for(int i = 0; i < sizeof(path); i++){
approx[i]++;
top_k[i] = path[i]++;
}
}
}
|
4752757c7974e7bc398f44d1e5ef733560c8312b.cu
|
#include "includes.h"
__global__ void initArray(uint32_t * path, double *approx, uint32_t *top_k, int n){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < n){
for(int i = 0; i < sizeof(path); i++){
approx[i]++;
top_k[i] = path[i]++;
}
}
}
|
699fac47b11df369ba1fcbcc255f46c2511df156.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
static __global__ void example_cuda_kernel(int& r, int x, int y)
{
r = x * y + ( x * 4 - (y/2) );
}
int static_launch_kernelB(int x, int y)
{
int r;
hipLaunchKernelGGL(( example_cuda_kernel), dim3(1), dim3(1), 0, 0, r, x, y);
return r;
}
|
699fac47b11df369ba1fcbcc255f46c2511df156.cu
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
static __global__ void example_cuda_kernel(int& r, int x, int y)
{
r = x * y + ( x * 4 - (y/2) );
}
int static_launch_kernelB(int x, int y)
{
int r;
example_cuda_kernel<<<1, 1>>>(r, x, y);
return r;
}
|
4527debec7aad6bc6501066c330063796b5c5793.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "gpuerrchk.cuh"
#include "assert.h"
#include "real.h"
#include "ch8_in_scan.cuh"
#include <iostream>
#define DAT_SIZE 8
void test(){
real X[DAT_SIZE];
real Y[DAT_SIZE];
real* d_X;
real* d_Y;
gpuErrchk(hipMalloc((void**) &d_X,sizeof(real)*DAT_SIZE));
gpuErrchk(hipMemcpy(d_X,X,sizeof(real)*DAT_SIZE,hipMemcpyHostToDevice));
gpuErrchk(hipMalloc((void**) &d_Y,sizeof(real)*DAT_SIZE ));
ch8_in_scan(d_X,d_Y,8);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk(hipMemcpy(Y,d_Y,sizeof(real)*DAT_SIZE,hipMemcpyDeviceToHost));
gpuErrchk(hipFree(d_X));
gpuErrchk(hipFree(d_Y));
}
int main(){
test();
}
|
4527debec7aad6bc6501066c330063796b5c5793.cu
|
#include "gpuerrchk.cuh"
#include "assert.h"
#include "real.h"
#include "ch8_in_scan.cuh"
#include <iostream>
#define DAT_SIZE 8
void test(){
real X[DAT_SIZE];
real Y[DAT_SIZE];
real* d_X;
real* d_Y;
gpuErrchk(cudaMalloc((void**) &d_X,sizeof(real)*DAT_SIZE));
gpuErrchk(cudaMemcpy(d_X,X,sizeof(real)*DAT_SIZE,cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc((void**) &d_Y,sizeof(real)*DAT_SIZE ));
ch8_in_scan(d_X,d_Y,8);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk(cudaMemcpy(Y,d_Y,sizeof(real)*DAT_SIZE,cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(d_X));
gpuErrchk(cudaFree(d_Y));
}
int main(){
test();
}
|
0cc3d201e86e00fbadc7450782911de75a024bdb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
using namespace std;
#include "Image.h"
#include "Template.h"
#include "CombineImage.h"
#define ROUND_NUM 1
#define INIMG_CNT 3
char *infilename[] = { "okano01.bmp", "okano02.bmp", "hist_in.bmp" };
int main()
{
CombineImage ci;
hipEvent_t start, stop;
float elapsedTime = 0.0;
Image *inimg[INIMG_CNT];
for (int i = 0; i < INIMG_CNT; i++) {
ImageBasicOp::newImage(&inimg[i]);
ImageBasicOp::readFromFile(infilename[i], inimg[i]);
}
Image *outimg;
ImageBasicOp::newImage(&outimg);
//ImageBasicOp::makeAtCurrentDevice(outimg, 648, 482);
cout << "AA" << endl;
for (int i = 0; i <= ROUND_NUM; i++) {
hipEventCreate(&start);
hipEventCreate(&stop);
//cout << "Test start!" << endl;
hipEventRecord(start, 0);
ci.combineImageMax(inimg, INIMG_CNT, outimg);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
//cout << hipGetErrorString(hipGetLastError()) << endl;
hipEventElapsedTime(&elapsedTime, start, stop);
cout << elapsedTime << endl;
hipEventDestroy(start);
hipEventDestroy(stop);
}
ImageBasicOp::copyToHost(outimg);
ImageBasicOp::writeToFile("out.bmp", outimg);
for (int i = 0; i < INIMG_CNT; i++)
ImageBasicOp::deleteImage(inimg[i]);
ImageBasicOp::deleteImage(outimg);
return 0;
}
|
0cc3d201e86e00fbadc7450782911de75a024bdb.cu
|
#include <iostream>
using namespace std;
#include "Image.h"
#include "Template.h"
#include "CombineImage.h"
#define ROUND_NUM 1
#define INIMG_CNT 3
char *infilename[] = { "okano01.bmp", "okano02.bmp", "hist_in.bmp" };
int main()
{
CombineImage ci;
cudaEvent_t start, stop;
float elapsedTime = 0.0;
Image *inimg[INIMG_CNT];
for (int i = 0; i < INIMG_CNT; i++) {
ImageBasicOp::newImage(&inimg[i]);
ImageBasicOp::readFromFile(infilename[i], inimg[i]);
}
Image *outimg;
ImageBasicOp::newImage(&outimg);
//ImageBasicOp::makeAtCurrentDevice(outimg, 648, 482);
cout << "AA" << endl;
for (int i = 0; i <= ROUND_NUM; i++) {
cudaEventCreate(&start);
cudaEventCreate(&stop);
//cout << "Test start!" << endl;
cudaEventRecord(start, 0);
ci.combineImageMax(inimg, INIMG_CNT, outimg);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//cout << cudaGetErrorString(cudaGetLastError()) << endl;
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << elapsedTime << endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
ImageBasicOp::copyToHost(outimg);
ImageBasicOp::writeToFile("out.bmp", outimg);
for (int i = 0; i < INIMG_CNT; i++)
ImageBasicOp::deleteImage(inimg[i]);
ImageBasicOp::deleteImage(outimg);
return 0;
}
|
f768e51f9df28d61c6f5b1e3d19d9c5c78cf7fbc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#define CUDA_DISABLER
#if !defined CUDA_DISABLER
#include <thrust/device_ptr.h>
#include <thrust/transform.h>
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/emulation.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/functional.hpp"
namespace cv { namespace gpu { namespace device
{
namespace hough
{
__device__ static int g_counter;
template <typename T, int PIXELS_PER_THREAD>
__global__ void buildEdgePointList(const PtrStepSzb edges, const PtrStep<T> dx, const PtrStep<T> dy, unsigned int* coordList, float* thetaList)
{
__shared__ unsigned int s_coordLists[4][32 * PIXELS_PER_THREAD];
__shared__ float s_thetaLists[4][32 * PIXELS_PER_THREAD];
__shared__ int s_sizes[4];
__shared__ int s_globStart[4];
const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (threadIdx.x == 0)
s_sizes[threadIdx.y] = 0;
__syncthreads();
if (y < edges.rows)
{
// fill the queue
const uchar* edgesRow = edges.ptr(y);
const T* dxRow = dx.ptr(y);
const T* dyRow = dy.ptr(y);
for (int i = 0, xx = x; i < PIXELS_PER_THREAD && xx < edges.cols; ++i, xx += blockDim.x)
{
const T dxVal = dxRow[xx];
const T dyVal = dyRow[xx];
if (edgesRow[xx] && (dxVal != 0 || dyVal != 0))
{
const unsigned int coord = (y << 16) | xx;
float theta = ::atan2f(dyVal, dxVal);
if (theta < 0)
theta += 2.0f * CV_PI_F;
const int qidx = Emulation::smem::atomicAdd(&s_sizes[threadIdx.y], 1);
s_coordLists[threadIdx.y][qidx] = coord;
s_thetaLists[threadIdx.y][qidx] = theta;
}
}
}
__syncthreads();
// let one thread reserve the space required in the global list
if (threadIdx.x == 0 && threadIdx.y == 0)
{
// find how many items are stored in each list
int totalSize = 0;
for (int i = 0; i < blockDim.y; ++i)
{
s_globStart[i] = totalSize;
totalSize += s_sizes[i];
}
// calculate the offset in the global list
const int globalOffset = atomicAdd(&g_counter, totalSize);
for (int i = 0; i < blockDim.y; ++i)
s_globStart[i] += globalOffset;
}
__syncthreads();
// copy local queues to global queue
const int qsize = s_sizes[threadIdx.y];
int gidx = s_globStart[threadIdx.y] + threadIdx.x;
for(int i = threadIdx.x; i < qsize; i += blockDim.x, gidx += blockDim.x)
{
coordList[gidx] = s_coordLists[threadIdx.y][i];
thetaList[gidx] = s_thetaLists[threadIdx.y][i];
}
}
template <typename T>
int buildEdgePointList_gpu(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList)
{
const int PIXELS_PER_THREAD = 8;
void* counterPtr;
cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) );
cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) );
const dim3 block(32, 4);
const dim3 grid(divUp(edges.cols, block.x * PIXELS_PER_THREAD), divUp(edges.rows, block.y));
cudaSafeCall( hipFuncSetCacheConfig(buildEdgePointList<T, PIXELS_PER_THREAD>, hipFuncCachePreferShared) );
hipLaunchKernelGGL(( buildEdgePointList<T, PIXELS_PER_THREAD>), dim3(grid), dim3(block), 0, 0, edges, (PtrStepSz<T>) dx, (PtrStepSz<T>) dy, coordList, thetaList);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
int totalCount;
cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) );
return totalCount;
}
template int buildEdgePointList_gpu<short>(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList);
template int buildEdgePointList_gpu<int>(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList);
template int buildEdgePointList_gpu<float>(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList);
__global__ void buildRTable(const unsigned int* coordList, const float* thetaList, const int pointsCount,
PtrStep<short2> r_table, int* r_sizes, int maxSize,
const short2 templCenter, const float thetaScale)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= pointsCount)
return;
const unsigned int coord = coordList[tid];
short2 p;
p.x = (coord & 0xFFFF);
p.y = (coord >> 16) & 0xFFFF;
const float theta = thetaList[tid];
const int n = __float2int_rn(theta * thetaScale);
const int ind = ::atomicAdd(r_sizes + n, 1);
if (ind < maxSize)
r_table(n, ind) = saturate_cast<short2>(p - templCenter);
}
void buildRTable_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount,
PtrStepSz<short2> r_table, int* r_sizes,
short2 templCenter, int levels)
{
const dim3 block(256);
const dim3 grid(divUp(pointsCount, block.x));
const float thetaScale = levels / (2.0f * CV_PI_F);
hipLaunchKernelGGL(( buildRTable), dim3(grid), dim3(block), 0, 0, coordList, thetaList, pointsCount, r_table, r_sizes, r_table.cols, templCenter, thetaScale);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// GHT_Ballard_Pos
__global__ void GHT_Ballard_Pos_calcHist(const unsigned int* coordList, const float* thetaList, const int pointsCount,
const PtrStep<short2> r_table, const int* r_sizes,
PtrStepSzi hist,
const float idp, const float thetaScale)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= pointsCount)
return;
const unsigned int coord = coordList[tid];
short2 p;
p.x = (coord & 0xFFFF);
p.y = (coord >> 16) & 0xFFFF;
const float theta = thetaList[tid];
const int n = __float2int_rn(theta * thetaScale);
const short2* r_row = r_table.ptr(n);
const int r_row_size = r_sizes[n];
for (int j = 0; j < r_row_size; ++j)
{
int2 c = p - r_row[j];
c.x = __float2int_rn(c.x * idp);
c.y = __float2int_rn(c.y * idp);
if (c.x >= 0 && c.x < hist.cols - 2 && c.y >= 0 && c.y < hist.rows - 2)
::atomicAdd(hist.ptr(c.y + 1) + c.x + 1, 1);
}
}
void GHT_Ballard_Pos_calcHist_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount,
PtrStepSz<short2> r_table, const int* r_sizes,
PtrStepSzi hist,
float dp, int levels)
{
const dim3 block(256);
const dim3 grid(divUp(pointsCount, block.x));
const float idp = 1.0f / dp;
const float thetaScale = levels / (2.0f * CV_PI_F);
hipLaunchKernelGGL(( GHT_Ballard_Pos_calcHist), dim3(grid), dim3(block), 0, 0, coordList, thetaList, pointsCount, r_table, r_sizes, hist, idp, thetaScale);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
__global__ void GHT_Ballard_Pos_findPosInHist(const PtrStepSzi hist, float4* out, int3* votes, const int maxSize, const float dp, const int threshold)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= hist.cols - 2 || y >= hist.rows - 2)
return;
const int curVotes = hist(y + 1, x + 1);
if (curVotes > threshold &&
curVotes > hist(y + 1, x) &&
curVotes >= hist(y + 1, x + 2) &&
curVotes > hist(y, x + 1) &&
curVotes >= hist(y + 2, x + 1))
{
const int ind = ::atomicAdd(&g_counter, 1);
if (ind < maxSize)
{
out[ind] = make_float4(x * dp, y * dp, 1.0f, 0.0f);
votes[ind] = make_int3(curVotes, 0, 0);
}
}
}
int GHT_Ballard_Pos_findPosInHist_gpu(PtrStepSzi hist, float4* out, int3* votes, int maxSize, float dp, int threshold)
{
void* counterPtr;
cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) );
cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) );
const dim3 block(32, 8);
const dim3 grid(divUp(hist.cols - 2, block.x), divUp(hist.rows - 2, block.y));
cudaSafeCall( hipFuncSetCacheConfig(GHT_Ballard_Pos_findPosInHist, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( GHT_Ballard_Pos_findPosInHist), dim3(grid), dim3(block), 0, 0, hist, out, votes, maxSize, dp, threshold);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
int totalCount;
cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) );
totalCount = ::min(totalCount, maxSize);
return totalCount;
}
////////////////////////////////////////////////////////////////////////
// GHT_Ballard_PosScale
__global__ void GHT_Ballard_PosScale_calcHist(const unsigned int* coordList, const float* thetaList,
PtrStep<short2> r_table, const int* r_sizes,
PtrStepi hist, const int rows, const int cols,
const float minScale, const float scaleStep, const int scaleRange,
const float idp, const float thetaScale)
{
const unsigned int coord = coordList[blockIdx.x];
float2 p;
p.x = (coord & 0xFFFF);
p.y = (coord >> 16) & 0xFFFF;
const float theta = thetaList[blockIdx.x];
const int n = __float2int_rn(theta * thetaScale);
const short2* r_row = r_table.ptr(n);
const int r_row_size = r_sizes[n];
for (int j = 0; j < r_row_size; ++j)
{
const float2 d = saturate_cast<float2>(r_row[j]);
for (int s = threadIdx.x; s < scaleRange; s += blockDim.x)
{
const float scale = minScale + s * scaleStep;
float2 c = p - scale * d;
c.x *= idp;
c.y *= idp;
if (c.x >= 0 && c.x < cols && c.y >= 0 && c.y < rows)
::atomicAdd(hist.ptr((s + 1) * (rows + 2) + __float2int_rn(c.y + 1)) + __float2int_rn(c.x + 1), 1);
}
}
}
void GHT_Ballard_PosScale_calcHist_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount,
PtrStepSz<short2> r_table, const int* r_sizes,
PtrStepi hist, int rows, int cols,
float minScale, float scaleStep, int scaleRange,
float dp, int levels)
{
const dim3 block(256);
const dim3 grid(pointsCount);
const float idp = 1.0f / dp;
const float thetaScale = levels / (2.0f * CV_PI_F);
hipLaunchKernelGGL(( GHT_Ballard_PosScale_calcHist), dim3(grid), dim3(block), 0, 0, coordList, thetaList,
r_table, r_sizes,
hist, rows, cols,
minScale, scaleStep, scaleRange,
idp, thetaScale);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
__global__ void GHT_Ballard_PosScale_findPosInHist(const PtrStepi hist, const int rows, const int cols, const int scaleRange,
float4* out, int3* votes, const int maxSize,
const float minScale, const float scaleStep, const float dp, const int threshold)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cols || y >= rows)
return;
for (int s = 0; s < scaleRange; ++s)
{
const float scale = minScale + s * scaleStep;
const int prevScaleIdx = (s) * (rows + 2);
const int curScaleIdx = (s + 1) * (rows + 2);
const int nextScaleIdx = (s + 2) * (rows + 2);
const int curVotes = hist(curScaleIdx + y + 1, x + 1);
if (curVotes > threshold &&
curVotes > hist(curScaleIdx + y + 1, x) &&
curVotes >= hist(curScaleIdx + y + 1, x + 2) &&
curVotes > hist(curScaleIdx + y, x + 1) &&
curVotes >= hist(curScaleIdx + y + 2, x + 1) &&
curVotes > hist(prevScaleIdx + y + 1, x + 1) &&
curVotes >= hist(nextScaleIdx + y + 1, x + 1))
{
const int ind = ::atomicAdd(&g_counter, 1);
if (ind < maxSize)
{
out[ind] = make_float4(x * dp, y * dp, scale, 0.0f);
votes[ind] = make_int3(curVotes, curVotes, 0);
}
}
}
}
int GHT_Ballard_PosScale_findPosInHist_gpu(PtrStepi hist, int rows, int cols, int scaleRange, float4* out, int3* votes, int maxSize,
float minScale, float scaleStep, float dp, int threshold)
{
void* counterPtr;
cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) );
cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) );
const dim3 block(32, 8);
const dim3 grid(divUp(cols, block.x), divUp(rows, block.y));
cudaSafeCall( hipFuncSetCacheConfig(GHT_Ballard_PosScale_findPosInHist, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( GHT_Ballard_PosScale_findPosInHist), dim3(grid), dim3(block), 0, 0, hist, rows, cols, scaleRange, out, votes, maxSize, minScale, scaleStep, dp, threshold);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
int totalCount;
cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) );
totalCount = ::min(totalCount, maxSize);
return totalCount;
}
////////////////////////////////////////////////////////////////////////
// GHT_Ballard_PosRotation
__global__ void GHT_Ballard_PosRotation_calcHist(const unsigned int* coordList, const float* thetaList,
PtrStep<short2> r_table, const int* r_sizes,
PtrStepi hist, const int rows, const int cols,
const float minAngle, const float angleStep, const int angleRange,
const float idp, const float thetaScale)
{
const unsigned int coord = coordList[blockIdx.x];
float2 p;
p.x = (coord & 0xFFFF);
p.y = (coord >> 16) & 0xFFFF;
const float thetaVal = thetaList[blockIdx.x];
for (int a = threadIdx.x; a < angleRange; a += blockDim.x)
{
const float angle = (minAngle + a * angleStep) * (CV_PI_F / 180.0f);
float sinA, cosA;
sincosf(angle, &sinA, &cosA);
float theta = thetaVal - angle;
if (theta < 0)
theta += 2.0f * CV_PI_F;
const int n = __float2int_rn(theta * thetaScale);
const short2* r_row = r_table.ptr(n);
const int r_row_size = r_sizes[n];
for (int j = 0; j < r_row_size; ++j)
{
const float2 d = saturate_cast<float2>(r_row[j]);
const float2 dr = make_float2(d.x * cosA - d.y * sinA, d.x * sinA + d.y * cosA);
float2 c = make_float2(p.x - dr.x, p.y - dr.y);
c.x *= idp;
c.y *= idp;
if (c.x >= 0 && c.x < cols && c.y >= 0 && c.y < rows)
::atomicAdd(hist.ptr((a + 1) * (rows + 2) + __float2int_rn(c.y + 1)) + __float2int_rn(c.x + 1), 1);
}
}
}
void GHT_Ballard_PosRotation_calcHist_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount,
PtrStepSz<short2> r_table, const int* r_sizes,
PtrStepi hist, int rows, int cols,
float minAngle, float angleStep, int angleRange,
float dp, int levels)
{
const dim3 block(256);
const dim3 grid(pointsCount);
const float idp = 1.0f / dp;
const float thetaScale = levels / (2.0f * CV_PI_F);
hipLaunchKernelGGL(( GHT_Ballard_PosRotation_calcHist), dim3(grid), dim3(block), 0, 0, coordList, thetaList,
r_table, r_sizes,
hist, rows, cols,
minAngle, angleStep, angleRange,
idp, thetaScale);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
__global__ void GHT_Ballard_PosRotation_findPosInHist(const PtrStepi hist, const int rows, const int cols, const int angleRange,
float4* out, int3* votes, const int maxSize,
const float minAngle, const float angleStep, const float dp, const int threshold)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cols || y >= rows)
return;
for (int a = 0; a < angleRange; ++a)
{
const float angle = minAngle + a * angleStep;
const int prevAngleIdx = (a) * (rows + 2);
const int curAngleIdx = (a + 1) * (rows + 2);
const int nextAngleIdx = (a + 2) * (rows + 2);
const int curVotes = hist(curAngleIdx + y + 1, x + 1);
if (curVotes > threshold &&
curVotes > hist(curAngleIdx + y + 1, x) &&
curVotes >= hist(curAngleIdx + y + 1, x + 2) &&
curVotes > hist(curAngleIdx + y, x + 1) &&
curVotes >= hist(curAngleIdx + y + 2, x + 1) &&
curVotes > hist(prevAngleIdx + y + 1, x + 1) &&
curVotes >= hist(nextAngleIdx + y + 1, x + 1))
{
const int ind = ::atomicAdd(&g_counter, 1);
if (ind < maxSize)
{
out[ind] = make_float4(x * dp, y * dp, 1.0f, angle);
votes[ind] = make_int3(curVotes, 0, curVotes);
}
}
}
}
int GHT_Ballard_PosRotation_findPosInHist_gpu(PtrStepi hist, int rows, int cols, int angleRange, float4* out, int3* votes, int maxSize,
float minAngle, float angleStep, float dp, int threshold)
{
void* counterPtr;
cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) );
cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) );
const dim3 block(32, 8);
const dim3 grid(divUp(cols, block.x), divUp(rows, block.y));
cudaSafeCall( hipFuncSetCacheConfig(GHT_Ballard_PosRotation_findPosInHist, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( GHT_Ballard_PosRotation_findPosInHist), dim3(grid), dim3(block), 0, 0, hist, rows, cols, angleRange, out, votes, maxSize, minAngle, angleStep, dp, threshold);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
int totalCount;
cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) );
totalCount = ::min(totalCount, maxSize);
return totalCount;
}
////////////////////////////////////////////////////////////////////////
// GHT_Guil_Full
struct FeatureTable
{
uchar* p1_pos_data;
size_t p1_pos_step;
uchar* p1_theta_data;
size_t p1_theta_step;
uchar* p2_pos_data;
size_t p2_pos_step;
uchar* d12_data;
size_t d12_step;
uchar* r1_data;
size_t r1_step;
uchar* r2_data;
size_t r2_step;
};
__constant__ FeatureTable c_templFeatures;
__constant__ FeatureTable c_imageFeatures;
void GHT_Guil_Full_setTemplFeatures(PtrStepb p1_pos, PtrStepb p1_theta, PtrStepb p2_pos, PtrStepb d12, PtrStepb r1, PtrStepb r2)
{
FeatureTable tbl;
tbl.p1_pos_data = p1_pos.data;
tbl.p1_pos_step = p1_pos.step;
tbl.p1_theta_data = p1_theta.data;
tbl.p1_theta_step = p1_theta.step;
tbl.p2_pos_data = p2_pos.data;
tbl.p2_pos_step = p2_pos.step;
tbl.d12_data = d12.data;
tbl.d12_step = d12.step;
tbl.r1_data = r1.data;
tbl.r1_step = r1.step;
tbl.r2_data = r2.data;
tbl.r2_step = r2.step;
cudaSafeCall( hipMemcpyToSymbol(c_templFeatures, &tbl, sizeof(FeatureTable)) );
}
void GHT_Guil_Full_setImageFeatures(PtrStepb p1_pos, PtrStepb p1_theta, PtrStepb p2_pos, PtrStepb d12, PtrStepb r1, PtrStepb r2)
{
FeatureTable tbl;
tbl.p1_pos_data = p1_pos.data;
tbl.p1_pos_step = p1_pos.step;
tbl.p1_theta_data = p1_theta.data;
tbl.p1_theta_step = p1_theta.step;
tbl.p2_pos_data = p2_pos.data;
tbl.p2_pos_step = p2_pos.step;
tbl.d12_data = d12.data;
tbl.d12_step = d12.step;
tbl.r1_data = r1.data;
tbl.r1_step = r1.step;
tbl.r2_data = r2.data;
tbl.r2_step = r2.step;
cudaSafeCall( hipMemcpyToSymbol(c_imageFeatures, &tbl, sizeof(FeatureTable)) );
}
struct TemplFeatureTable
{
static __device__ float2* p1_pos(int n)
{
return (float2*)(c_templFeatures.p1_pos_data + n * c_templFeatures.p1_pos_step);
}
static __device__ float* p1_theta(int n)
{
return (float*)(c_templFeatures.p1_theta_data + n * c_templFeatures.p1_theta_step);
}
static __device__ float2* p2_pos(int n)
{
return (float2*)(c_templFeatures.p2_pos_data + n * c_templFeatures.p2_pos_step);
}
static __device__ float* d12(int n)
{
return (float*)(c_templFeatures.d12_data + n * c_templFeatures.d12_step);
}
static __device__ float2* r1(int n)
{
return (float2*)(c_templFeatures.r1_data + n * c_templFeatures.r1_step);
}
static __device__ float2* r2(int n)
{
return (float2*)(c_templFeatures.r2_data + n * c_templFeatures.r2_step);
}
};
struct ImageFeatureTable
{
static __device__ float2* p1_pos(int n)
{
return (float2*)(c_imageFeatures.p1_pos_data + n * c_imageFeatures.p1_pos_step);
}
static __device__ float* p1_theta(int n)
{
return (float*)(c_imageFeatures.p1_theta_data + n * c_imageFeatures.p1_theta_step);
}
static __device__ float2* p2_pos(int n)
{
return (float2*)(c_imageFeatures.p2_pos_data + n * c_imageFeatures.p2_pos_step);
}
static __device__ float* d12(int n)
{
return (float*)(c_imageFeatures.d12_data + n * c_imageFeatures.d12_step);
}
static __device__ float2* r1(int n)
{
return (float2*)(c_imageFeatures.r1_data + n * c_imageFeatures.r1_step);
}
static __device__ float2* r2(int n)
{
return (float2*)(c_imageFeatures.r2_data + n * c_imageFeatures.r2_step);
}
};
__device__ float clampAngle(float a)
{
float res = a;
while (res > 2.0f * CV_PI_F)
res -= 2.0f * CV_PI_F;
while (res < 0.0f)
res += 2.0f * CV_PI_F;
return res;
}
__device__ bool angleEq(float a, float b, float eps)
{
return (::fabs(clampAngle(a - b)) <= eps);
}
template <class FT, bool isTempl>
__global__ void GHT_Guil_Full_buildFeatureList(const unsigned int* coordList, const float* thetaList, const int pointsCount,
int* sizes, const int maxSize,
const float xi, const float angleEpsilon, const float alphaScale,
const float2 center, const float maxDist)
{
const float p1_theta = thetaList[blockIdx.x];
const unsigned int coord1 = coordList[blockIdx.x];
float2 p1_pos;
p1_pos.x = (coord1 & 0xFFFF);
p1_pos.y = (coord1 >> 16) & 0xFFFF;
for (int i = threadIdx.x; i < pointsCount; i += blockDim.x)
{
const float p2_theta = thetaList[i];
const unsigned int coord2 = coordList[i];
float2 p2_pos;
p2_pos.x = (coord2 & 0xFFFF);
p2_pos.y = (coord2 >> 16) & 0xFFFF;
if (angleEq(p1_theta - p2_theta, xi, angleEpsilon))
{
const float2 d = p1_pos - p2_pos;
float alpha12 = clampAngle(::atan2(d.y, d.x) - p1_theta);
float d12 = ::sqrtf(d.x * d.x + d.y * d.y);
if (d12 > maxDist)
continue;
float2 r1 = p1_pos - center;
float2 r2 = p2_pos - center;
const int n = __float2int_rn(alpha12 * alphaScale);
const int ind = ::atomicAdd(sizes + n, 1);
if (ind < maxSize)
{
if (!isTempl)
{
FT::p1_pos(n)[ind] = p1_pos;
FT::p2_pos(n)[ind] = p2_pos;
}
FT::p1_theta(n)[ind] = p1_theta;
FT::d12(n)[ind] = d12;
if (isTempl)
{
FT::r1(n)[ind] = r1;
FT::r2(n)[ind] = r2;
}
}
}
}
}
template <class FT, bool isTempl>
void GHT_Guil_Full_buildFeatureList_caller(const unsigned int* coordList, const float* thetaList, int pointsCount,
int* sizes, int maxSize,
float xi, float angleEpsilon, int levels,
float2 center, float maxDist)
{
const dim3 block(256);
const dim3 grid(pointsCount);
const float alphaScale = levels / (2.0f * CV_PI_F);
hipLaunchKernelGGL(( GHT_Guil_Full_buildFeatureList<FT, isTempl>), dim3(grid), dim3(block), 0, 0, coordList, thetaList, pointsCount,
sizes, maxSize,
xi * (CV_PI_F / 180.0f), angleEpsilon * (CV_PI_F / 180.0f), alphaScale,
center, maxDist);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
thrust::device_ptr<int> sizesPtr(sizes);
thrust::transform(sizesPtr, sizesPtr + levels + 1, sizesPtr, device::bind2nd(device::minimum<int>(), maxSize));
}
void GHT_Guil_Full_buildTemplFeatureList_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount,
int* sizes, int maxSize,
float xi, float angleEpsilon, int levels,
float2 center, float maxDist)
{
GHT_Guil_Full_buildFeatureList_caller<TemplFeatureTable, true>(coordList, thetaList, pointsCount,
sizes, maxSize,
xi, angleEpsilon, levels,
center, maxDist);
}
void GHT_Guil_Full_buildImageFeatureList_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount,
int* sizes, int maxSize,
float xi, float angleEpsilon, int levels,
float2 center, float maxDist)
{
GHT_Guil_Full_buildFeatureList_caller<ImageFeatureTable, false>(coordList, thetaList, pointsCount,
sizes, maxSize,
xi, angleEpsilon, levels,
center, maxDist);
}
__global__ void GHT_Guil_Full_calcOHist(const int* templSizes, const int* imageSizes, int* OHist,
const float minAngle, const float maxAngle, const float iAngleStep, const int angleRange)
{
extern __shared__ int s_OHist[];
for (int i = threadIdx.x; i <= angleRange; i += blockDim.x)
s_OHist[i] = 0;
__syncthreads();
const int tIdx = blockIdx.x;
const int level = blockIdx.y;
const int tSize = templSizes[level];
if (tIdx < tSize)
{
const int imSize = imageSizes[level];
const float t_p1_theta = TemplFeatureTable::p1_theta(level)[tIdx];
for (int i = threadIdx.x; i < imSize; i += blockDim.x)
{
const float im_p1_theta = ImageFeatureTable::p1_theta(level)[i];
const float angle = clampAngle(im_p1_theta - t_p1_theta);
if (angle >= minAngle && angle <= maxAngle)
{
const int n = __float2int_rn((angle - minAngle) * iAngleStep);
Emulation::smem::atomicAdd(&s_OHist[n], 1);
}
}
}
__syncthreads();
for (int i = threadIdx.x; i <= angleRange; i += blockDim.x)
::atomicAdd(OHist + i, s_OHist[i]);
}
void GHT_Guil_Full_calcOHist_gpu(const int* templSizes, const int* imageSizes, int* OHist,
float minAngle, float maxAngle, float angleStep, int angleRange,
int levels, int tMaxSize)
{
const dim3 block(256);
const dim3 grid(tMaxSize, levels + 1);
minAngle *= (CV_PI_F / 180.0f);
maxAngle *= (CV_PI_F / 180.0f);
angleStep *= (CV_PI_F / 180.0f);
const size_t smemSize = (angleRange + 1) * sizeof(float);
hipLaunchKernelGGL(( GHT_Guil_Full_calcOHist), dim3(grid), dim3(block), smemSize, 0, templSizes, imageSizes, OHist,
minAngle, maxAngle, 1.0f / angleStep, angleRange);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
__global__ void GHT_Guil_Full_calcSHist(const int* templSizes, const int* imageSizes, int* SHist,
const float angle, const float angleEpsilon,
const float minScale, const float maxScale, const float iScaleStep, const int scaleRange)
{
extern __shared__ int s_SHist[];
for (int i = threadIdx.x; i <= scaleRange; i += blockDim.x)
s_SHist[i] = 0;
__syncthreads();
const int tIdx = blockIdx.x;
const int level = blockIdx.y;
const int tSize = templSizes[level];
if (tIdx < tSize)
{
const int imSize = imageSizes[level];
const float t_p1_theta = TemplFeatureTable::p1_theta(level)[tIdx] + angle;
const float t_d12 = TemplFeatureTable::d12(level)[tIdx] + angle;
for (int i = threadIdx.x; i < imSize; i += blockDim.x)
{
const float im_p1_theta = ImageFeatureTable::p1_theta(level)[i];
const float im_d12 = ImageFeatureTable::d12(level)[i];
if (angleEq(im_p1_theta, t_p1_theta, angleEpsilon))
{
const float scale = im_d12 / t_d12;
if (scale >= minScale && scale <= maxScale)
{
const int s = __float2int_rn((scale - minScale) * iScaleStep);
Emulation::smem::atomicAdd(&s_SHist[s], 1);
}
}
}
}
__syncthreads();
for (int i = threadIdx.x; i <= scaleRange; i += blockDim.x)
::atomicAdd(SHist + i, s_SHist[i]);
}
void GHT_Guil_Full_calcSHist_gpu(const int* templSizes, const int* imageSizes, int* SHist,
float angle, float angleEpsilon,
float minScale, float maxScale, float iScaleStep, int scaleRange,
int levels, int tMaxSize)
{
const dim3 block(256);
const dim3 grid(tMaxSize, levels + 1);
angle *= (CV_PI_F / 180.0f);
angleEpsilon *= (CV_PI_F / 180.0f);
const size_t smemSize = (scaleRange + 1) * sizeof(float);
hipLaunchKernelGGL(( GHT_Guil_Full_calcSHist), dim3(grid), dim3(block), smemSize, 0, templSizes, imageSizes, SHist,
angle, angleEpsilon,
minScale, maxScale, iScaleStep, scaleRange);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
__global__ void GHT_Guil_Full_calcPHist(const int* templSizes, const int* imageSizes, PtrStepSzi PHist,
const float angle, const float sinVal, const float cosVal, const float angleEpsilon, const float scale,
const float idp)
{
const int tIdx = blockIdx.x;
const int level = blockIdx.y;
const int tSize = templSizes[level];
if (tIdx < tSize)
{
const int imSize = imageSizes[level];
const float t_p1_theta = TemplFeatureTable::p1_theta(level)[tIdx] + angle;
float2 r1 = TemplFeatureTable::r1(level)[tIdx];
float2 r2 = TemplFeatureTable::r2(level)[tIdx];
r1 = r1 * scale;
r2 = r2 * scale;
r1 = make_float2(cosVal * r1.x - sinVal * r1.y, sinVal * r1.x + cosVal * r1.y);
r2 = make_float2(cosVal * r2.x - sinVal * r2.y, sinVal * r2.x + cosVal * r2.y);
for (int i = threadIdx.x; i < imSize; i += blockDim.x)
{
const float im_p1_theta = ImageFeatureTable::p1_theta(level)[i];
const float2 im_p1_pos = ImageFeatureTable::p1_pos(level)[i];
const float2 im_p2_pos = ImageFeatureTable::p2_pos(level)[i];
if (angleEq(im_p1_theta, t_p1_theta, angleEpsilon))
{
float2 c1, c2;
c1 = im_p1_pos - r1;
c1 = c1 * idp;
c2 = im_p2_pos - r2;
c2 = c2 * idp;
if (::fabs(c1.x - c2.x) > 1 || ::fabs(c1.y - c2.y) > 1)
continue;
if (c1.y >= 0 && c1.y < PHist.rows - 2 && c1.x >= 0 && c1.x < PHist.cols - 2)
::atomicAdd(PHist.ptr(__float2int_rn(c1.y) + 1) + __float2int_rn(c1.x) + 1, 1);
}
}
}
}
void GHT_Guil_Full_calcPHist_gpu(const int* templSizes, const int* imageSizes, PtrStepSzi PHist,
float angle, float angleEpsilon, float scale,
float dp,
int levels, int tMaxSize)
{
const dim3 block(256);
const dim3 grid(tMaxSize, levels + 1);
angle *= (CV_PI_F / 180.0f);
angleEpsilon *= (CV_PI_F / 180.0f);
const float sinVal = ::sinf(angle);
const float cosVal = ::cosf(angle);
cudaSafeCall( hipFuncSetCacheConfig(GHT_Guil_Full_calcPHist, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( GHT_Guil_Full_calcPHist), dim3(grid), dim3(block), 0, 0, templSizes, imageSizes, PHist,
angle, sinVal, cosVal, angleEpsilon, scale,
1.0f / dp);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
__global__ void GHT_Guil_Full_findPosInHist(const PtrStepSzi hist, float4* out, int3* votes, const int maxSize,
const float angle, const int angleVotes, const float scale, const int scaleVotes,
const float dp, const int threshold)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= hist.cols - 2 || y >= hist.rows - 2)
return;
const int curVotes = hist(y + 1, x + 1);
if (curVotes > threshold &&
curVotes > hist(y + 1, x) &&
curVotes >= hist(y + 1, x + 2) &&
curVotes > hist(y, x + 1) &&
curVotes >= hist(y + 2, x + 1))
{
const int ind = ::atomicAdd(&g_counter, 1);
if (ind < maxSize)
{
out[ind] = make_float4(x * dp, y * dp, scale, angle);
votes[ind] = make_int3(curVotes, scaleVotes, angleVotes);
}
}
}
int GHT_Guil_Full_findPosInHist_gpu(PtrStepSzi hist, float4* out, int3* votes, int curSize, int maxSize,
float angle, int angleVotes, float scale, int scaleVotes,
float dp, int threshold)
{
void* counterPtr;
cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) );
cudaSafeCall( hipMemcpy(counterPtr, &curSize, sizeof(int), hipMemcpyHostToDevice) );
const dim3 block(32, 8);
const dim3 grid(divUp(hist.cols - 2, block.x), divUp(hist.rows - 2, block.y));
cudaSafeCall( hipFuncSetCacheConfig(GHT_Guil_Full_findPosInHist, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( GHT_Guil_Full_findPosInHist), dim3(grid), dim3(block), 0, 0, hist, out, votes, maxSize,
angle, angleVotes, scale, scaleVotes,
dp, threshold);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
int totalCount;
cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) );
totalCount = ::min(totalCount, maxSize);
return totalCount;
}
}
}}}
#endif /* CUDA_DISABLER */
|
f768e51f9df28d61c6f5b1e3d19d9c5c78cf7fbc.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#define CUDA_DISABLER
#if !defined CUDA_DISABLER
#include <thrust/device_ptr.h>
#include <thrust/transform.h>
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/emulation.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/functional.hpp"
namespace cv { namespace gpu { namespace device
{
namespace hough
{
__device__ static int g_counter;
template <typename T, int PIXELS_PER_THREAD>
__global__ void buildEdgePointList(const PtrStepSzb edges, const PtrStep<T> dx, const PtrStep<T> dy, unsigned int* coordList, float* thetaList)
{
__shared__ unsigned int s_coordLists[4][32 * PIXELS_PER_THREAD];
__shared__ float s_thetaLists[4][32 * PIXELS_PER_THREAD];
__shared__ int s_sizes[4];
__shared__ int s_globStart[4];
const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (threadIdx.x == 0)
s_sizes[threadIdx.y] = 0;
__syncthreads();
if (y < edges.rows)
{
// fill the queue
const uchar* edgesRow = edges.ptr(y);
const T* dxRow = dx.ptr(y);
const T* dyRow = dy.ptr(y);
for (int i = 0, xx = x; i < PIXELS_PER_THREAD && xx < edges.cols; ++i, xx += blockDim.x)
{
const T dxVal = dxRow[xx];
const T dyVal = dyRow[xx];
if (edgesRow[xx] && (dxVal != 0 || dyVal != 0))
{
const unsigned int coord = (y << 16) | xx;
float theta = ::atan2f(dyVal, dxVal);
if (theta < 0)
theta += 2.0f * CV_PI_F;
const int qidx = Emulation::smem::atomicAdd(&s_sizes[threadIdx.y], 1);
s_coordLists[threadIdx.y][qidx] = coord;
s_thetaLists[threadIdx.y][qidx] = theta;
}
}
}
__syncthreads();
// let one thread reserve the space required in the global list
if (threadIdx.x == 0 && threadIdx.y == 0)
{
// find how many items are stored in each list
int totalSize = 0;
for (int i = 0; i < blockDim.y; ++i)
{
s_globStart[i] = totalSize;
totalSize += s_sizes[i];
}
// calculate the offset in the global list
const int globalOffset = atomicAdd(&g_counter, totalSize);
for (int i = 0; i < blockDim.y; ++i)
s_globStart[i] += globalOffset;
}
__syncthreads();
// copy local queues to global queue
const int qsize = s_sizes[threadIdx.y];
int gidx = s_globStart[threadIdx.y] + threadIdx.x;
for(int i = threadIdx.x; i < qsize; i += blockDim.x, gidx += blockDim.x)
{
coordList[gidx] = s_coordLists[threadIdx.y][i];
thetaList[gidx] = s_thetaLists[threadIdx.y][i];
}
}
template <typename T>
int buildEdgePointList_gpu(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList)
{
const int PIXELS_PER_THREAD = 8;
void* counterPtr;
cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) );
cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) );
const dim3 block(32, 4);
const dim3 grid(divUp(edges.cols, block.x * PIXELS_PER_THREAD), divUp(edges.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(buildEdgePointList<T, PIXELS_PER_THREAD>, cudaFuncCachePreferShared) );
buildEdgePointList<T, PIXELS_PER_THREAD><<<grid, block>>>(edges, (PtrStepSz<T>) dx, (PtrStepSz<T>) dy, coordList, thetaList);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
int totalCount;
cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) );
return totalCount;
}
template int buildEdgePointList_gpu<short>(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList);
template int buildEdgePointList_gpu<int>(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList);
template int buildEdgePointList_gpu<float>(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList);
__global__ void buildRTable(const unsigned int* coordList, const float* thetaList, const int pointsCount,
PtrStep<short2> r_table, int* r_sizes, int maxSize,
const short2 templCenter, const float thetaScale)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= pointsCount)
return;
const unsigned int coord = coordList[tid];
short2 p;
p.x = (coord & 0xFFFF);
p.y = (coord >> 16) & 0xFFFF;
const float theta = thetaList[tid];
const int n = __float2int_rn(theta * thetaScale);
const int ind = ::atomicAdd(r_sizes + n, 1);
if (ind < maxSize)
r_table(n, ind) = saturate_cast<short2>(p - templCenter);
}
void buildRTable_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount,
PtrStepSz<short2> r_table, int* r_sizes,
short2 templCenter, int levels)
{
const dim3 block(256);
const dim3 grid(divUp(pointsCount, block.x));
const float thetaScale = levels / (2.0f * CV_PI_F);
buildRTable<<<grid, block>>>(coordList, thetaList, pointsCount, r_table, r_sizes, r_table.cols, templCenter, thetaScale);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// GHT_Ballard_Pos
__global__ void GHT_Ballard_Pos_calcHist(const unsigned int* coordList, const float* thetaList, const int pointsCount,
const PtrStep<short2> r_table, const int* r_sizes,
PtrStepSzi hist,
const float idp, const float thetaScale)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= pointsCount)
return;
const unsigned int coord = coordList[tid];
short2 p;
p.x = (coord & 0xFFFF);
p.y = (coord >> 16) & 0xFFFF;
const float theta = thetaList[tid];
const int n = __float2int_rn(theta * thetaScale);
const short2* r_row = r_table.ptr(n);
const int r_row_size = r_sizes[n];
for (int j = 0; j < r_row_size; ++j)
{
int2 c = p - r_row[j];
c.x = __float2int_rn(c.x * idp);
c.y = __float2int_rn(c.y * idp);
if (c.x >= 0 && c.x < hist.cols - 2 && c.y >= 0 && c.y < hist.rows - 2)
::atomicAdd(hist.ptr(c.y + 1) + c.x + 1, 1);
}
}
void GHT_Ballard_Pos_calcHist_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount,
PtrStepSz<short2> r_table, const int* r_sizes,
PtrStepSzi hist,
float dp, int levels)
{
const dim3 block(256);
const dim3 grid(divUp(pointsCount, block.x));
const float idp = 1.0f / dp;
const float thetaScale = levels / (2.0f * CV_PI_F);
GHT_Ballard_Pos_calcHist<<<grid, block>>>(coordList, thetaList, pointsCount, r_table, r_sizes, hist, idp, thetaScale);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void GHT_Ballard_Pos_findPosInHist(const PtrStepSzi hist, float4* out, int3* votes, const int maxSize, const float dp, const int threshold)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= hist.cols - 2 || y >= hist.rows - 2)
return;
const int curVotes = hist(y + 1, x + 1);
if (curVotes > threshold &&
curVotes > hist(y + 1, x) &&
curVotes >= hist(y + 1, x + 2) &&
curVotes > hist(y, x + 1) &&
curVotes >= hist(y + 2, x + 1))
{
const int ind = ::atomicAdd(&g_counter, 1);
if (ind < maxSize)
{
out[ind] = make_float4(x * dp, y * dp, 1.0f, 0.0f);
votes[ind] = make_int3(curVotes, 0, 0);
}
}
}
int GHT_Ballard_Pos_findPosInHist_gpu(PtrStepSzi hist, float4* out, int3* votes, int maxSize, float dp, int threshold)
{
void* counterPtr;
cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) );
cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) );
const dim3 block(32, 8);
const dim3 grid(divUp(hist.cols - 2, block.x), divUp(hist.rows - 2, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(GHT_Ballard_Pos_findPosInHist, cudaFuncCachePreferL1) );
GHT_Ballard_Pos_findPosInHist<<<grid, block>>>(hist, out, votes, maxSize, dp, threshold);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
int totalCount;
cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) );
totalCount = ::min(totalCount, maxSize);
return totalCount;
}
////////////////////////////////////////////////////////////////////////
// GHT_Ballard_PosScale
__global__ void GHT_Ballard_PosScale_calcHist(const unsigned int* coordList, const float* thetaList,
PtrStep<short2> r_table, const int* r_sizes,
PtrStepi hist, const int rows, const int cols,
const float minScale, const float scaleStep, const int scaleRange,
const float idp, const float thetaScale)
{
const unsigned int coord = coordList[blockIdx.x];
float2 p;
p.x = (coord & 0xFFFF);
p.y = (coord >> 16) & 0xFFFF;
const float theta = thetaList[blockIdx.x];
const int n = __float2int_rn(theta * thetaScale);
const short2* r_row = r_table.ptr(n);
const int r_row_size = r_sizes[n];
for (int j = 0; j < r_row_size; ++j)
{
const float2 d = saturate_cast<float2>(r_row[j]);
for (int s = threadIdx.x; s < scaleRange; s += blockDim.x)
{
const float scale = minScale + s * scaleStep;
float2 c = p - scale * d;
c.x *= idp;
c.y *= idp;
if (c.x >= 0 && c.x < cols && c.y >= 0 && c.y < rows)
::atomicAdd(hist.ptr((s + 1) * (rows + 2) + __float2int_rn(c.y + 1)) + __float2int_rn(c.x + 1), 1);
}
}
}
void GHT_Ballard_PosScale_calcHist_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount,
PtrStepSz<short2> r_table, const int* r_sizes,
PtrStepi hist, int rows, int cols,
float minScale, float scaleStep, int scaleRange,
float dp, int levels)
{
const dim3 block(256);
const dim3 grid(pointsCount);
const float idp = 1.0f / dp;
const float thetaScale = levels / (2.0f * CV_PI_F);
GHT_Ballard_PosScale_calcHist<<<grid, block>>>(coordList, thetaList,
r_table, r_sizes,
hist, rows, cols,
minScale, scaleStep, scaleRange,
idp, thetaScale);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void GHT_Ballard_PosScale_findPosInHist(const PtrStepi hist, const int rows, const int cols, const int scaleRange,
float4* out, int3* votes, const int maxSize,
const float minScale, const float scaleStep, const float dp, const int threshold)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cols || y >= rows)
return;
for (int s = 0; s < scaleRange; ++s)
{
const float scale = minScale + s * scaleStep;
const int prevScaleIdx = (s) * (rows + 2);
const int curScaleIdx = (s + 1) * (rows + 2);
const int nextScaleIdx = (s + 2) * (rows + 2);
const int curVotes = hist(curScaleIdx + y + 1, x + 1);
if (curVotes > threshold &&
curVotes > hist(curScaleIdx + y + 1, x) &&
curVotes >= hist(curScaleIdx + y + 1, x + 2) &&
curVotes > hist(curScaleIdx + y, x + 1) &&
curVotes >= hist(curScaleIdx + y + 2, x + 1) &&
curVotes > hist(prevScaleIdx + y + 1, x + 1) &&
curVotes >= hist(nextScaleIdx + y + 1, x + 1))
{
const int ind = ::atomicAdd(&g_counter, 1);
if (ind < maxSize)
{
out[ind] = make_float4(x * dp, y * dp, scale, 0.0f);
votes[ind] = make_int3(curVotes, curVotes, 0);
}
}
}
}
int GHT_Ballard_PosScale_findPosInHist_gpu(PtrStepi hist, int rows, int cols, int scaleRange, float4* out, int3* votes, int maxSize,
float minScale, float scaleStep, float dp, int threshold)
{
void* counterPtr;
cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) );
cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) );
const dim3 block(32, 8);
const dim3 grid(divUp(cols, block.x), divUp(rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(GHT_Ballard_PosScale_findPosInHist, cudaFuncCachePreferL1) );
GHT_Ballard_PosScale_findPosInHist<<<grid, block>>>(hist, rows, cols, scaleRange, out, votes, maxSize, minScale, scaleStep, dp, threshold);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
int totalCount;
cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) );
totalCount = ::min(totalCount, maxSize);
return totalCount;
}
////////////////////////////////////////////////////////////////////////
// GHT_Ballard_PosRotation
__global__ void GHT_Ballard_PosRotation_calcHist(const unsigned int* coordList, const float* thetaList,
PtrStep<short2> r_table, const int* r_sizes,
PtrStepi hist, const int rows, const int cols,
const float minAngle, const float angleStep, const int angleRange,
const float idp, const float thetaScale)
{
const unsigned int coord = coordList[blockIdx.x];
float2 p;
p.x = (coord & 0xFFFF);
p.y = (coord >> 16) & 0xFFFF;
const float thetaVal = thetaList[blockIdx.x];
for (int a = threadIdx.x; a < angleRange; a += blockDim.x)
{
const float angle = (minAngle + a * angleStep) * (CV_PI_F / 180.0f);
float sinA, cosA;
sincosf(angle, &sinA, &cosA);
float theta = thetaVal - angle;
if (theta < 0)
theta += 2.0f * CV_PI_F;
const int n = __float2int_rn(theta * thetaScale);
const short2* r_row = r_table.ptr(n);
const int r_row_size = r_sizes[n];
for (int j = 0; j < r_row_size; ++j)
{
const float2 d = saturate_cast<float2>(r_row[j]);
const float2 dr = make_float2(d.x * cosA - d.y * sinA, d.x * sinA + d.y * cosA);
float2 c = make_float2(p.x - dr.x, p.y - dr.y);
c.x *= idp;
c.y *= idp;
if (c.x >= 0 && c.x < cols && c.y >= 0 && c.y < rows)
::atomicAdd(hist.ptr((a + 1) * (rows + 2) + __float2int_rn(c.y + 1)) + __float2int_rn(c.x + 1), 1);
}
}
}
void GHT_Ballard_PosRotation_calcHist_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount,
PtrStepSz<short2> r_table, const int* r_sizes,
PtrStepi hist, int rows, int cols,
float minAngle, float angleStep, int angleRange,
float dp, int levels)
{
const dim3 block(256);
const dim3 grid(pointsCount);
const float idp = 1.0f / dp;
const float thetaScale = levels / (2.0f * CV_PI_F);
GHT_Ballard_PosRotation_calcHist<<<grid, block>>>(coordList, thetaList,
r_table, r_sizes,
hist, rows, cols,
minAngle, angleStep, angleRange,
idp, thetaScale);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void GHT_Ballard_PosRotation_findPosInHist(const PtrStepi hist, const int rows, const int cols, const int angleRange,
float4* out, int3* votes, const int maxSize,
const float minAngle, const float angleStep, const float dp, const int threshold)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cols || y >= rows)
return;
for (int a = 0; a < angleRange; ++a)
{
const float angle = minAngle + a * angleStep;
const int prevAngleIdx = (a) * (rows + 2);
const int curAngleIdx = (a + 1) * (rows + 2);
const int nextAngleIdx = (a + 2) * (rows + 2);
const int curVotes = hist(curAngleIdx + y + 1, x + 1);
if (curVotes > threshold &&
curVotes > hist(curAngleIdx + y + 1, x) &&
curVotes >= hist(curAngleIdx + y + 1, x + 2) &&
curVotes > hist(curAngleIdx + y, x + 1) &&
curVotes >= hist(curAngleIdx + y + 2, x + 1) &&
curVotes > hist(prevAngleIdx + y + 1, x + 1) &&
curVotes >= hist(nextAngleIdx + y + 1, x + 1))
{
const int ind = ::atomicAdd(&g_counter, 1);
if (ind < maxSize)
{
out[ind] = make_float4(x * dp, y * dp, 1.0f, angle);
votes[ind] = make_int3(curVotes, 0, curVotes);
}
}
}
}
int GHT_Ballard_PosRotation_findPosInHist_gpu(PtrStepi hist, int rows, int cols, int angleRange, float4* out, int3* votes, int maxSize,
float minAngle, float angleStep, float dp, int threshold)
{
void* counterPtr;
cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) );
cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) );
const dim3 block(32, 8);
const dim3 grid(divUp(cols, block.x), divUp(rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(GHT_Ballard_PosRotation_findPosInHist, cudaFuncCachePreferL1) );
GHT_Ballard_PosRotation_findPosInHist<<<grid, block>>>(hist, rows, cols, angleRange, out, votes, maxSize, minAngle, angleStep, dp, threshold);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
int totalCount;
cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) );
totalCount = ::min(totalCount, maxSize);
return totalCount;
}
////////////////////////////////////////////////////////////////////////
// GHT_Guil_Full
struct FeatureTable
{
uchar* p1_pos_data;
size_t p1_pos_step;
uchar* p1_theta_data;
size_t p1_theta_step;
uchar* p2_pos_data;
size_t p2_pos_step;
uchar* d12_data;
size_t d12_step;
uchar* r1_data;
size_t r1_step;
uchar* r2_data;
size_t r2_step;
};
__constant__ FeatureTable c_templFeatures;
__constant__ FeatureTable c_imageFeatures;
void GHT_Guil_Full_setTemplFeatures(PtrStepb p1_pos, PtrStepb p1_theta, PtrStepb p2_pos, PtrStepb d12, PtrStepb r1, PtrStepb r2)
{
FeatureTable tbl;
tbl.p1_pos_data = p1_pos.data;
tbl.p1_pos_step = p1_pos.step;
tbl.p1_theta_data = p1_theta.data;
tbl.p1_theta_step = p1_theta.step;
tbl.p2_pos_data = p2_pos.data;
tbl.p2_pos_step = p2_pos.step;
tbl.d12_data = d12.data;
tbl.d12_step = d12.step;
tbl.r1_data = r1.data;
tbl.r1_step = r1.step;
tbl.r2_data = r2.data;
tbl.r2_step = r2.step;
cudaSafeCall( cudaMemcpyToSymbol(c_templFeatures, &tbl, sizeof(FeatureTable)) );
}
void GHT_Guil_Full_setImageFeatures(PtrStepb p1_pos, PtrStepb p1_theta, PtrStepb p2_pos, PtrStepb d12, PtrStepb r1, PtrStepb r2)
{
FeatureTable tbl;
tbl.p1_pos_data = p1_pos.data;
tbl.p1_pos_step = p1_pos.step;
tbl.p1_theta_data = p1_theta.data;
tbl.p1_theta_step = p1_theta.step;
tbl.p2_pos_data = p2_pos.data;
tbl.p2_pos_step = p2_pos.step;
tbl.d12_data = d12.data;
tbl.d12_step = d12.step;
tbl.r1_data = r1.data;
tbl.r1_step = r1.step;
tbl.r2_data = r2.data;
tbl.r2_step = r2.step;
cudaSafeCall( cudaMemcpyToSymbol(c_imageFeatures, &tbl, sizeof(FeatureTable)) );
}
struct TemplFeatureTable
{
static __device__ float2* p1_pos(int n)
{
return (float2*)(c_templFeatures.p1_pos_data + n * c_templFeatures.p1_pos_step);
}
static __device__ float* p1_theta(int n)
{
return (float*)(c_templFeatures.p1_theta_data + n * c_templFeatures.p1_theta_step);
}
static __device__ float2* p2_pos(int n)
{
return (float2*)(c_templFeatures.p2_pos_data + n * c_templFeatures.p2_pos_step);
}
static __device__ float* d12(int n)
{
return (float*)(c_templFeatures.d12_data + n * c_templFeatures.d12_step);
}
static __device__ float2* r1(int n)
{
return (float2*)(c_templFeatures.r1_data + n * c_templFeatures.r1_step);
}
static __device__ float2* r2(int n)
{
return (float2*)(c_templFeatures.r2_data + n * c_templFeatures.r2_step);
}
};
struct ImageFeatureTable
{
static __device__ float2* p1_pos(int n)
{
return (float2*)(c_imageFeatures.p1_pos_data + n * c_imageFeatures.p1_pos_step);
}
static __device__ float* p1_theta(int n)
{
return (float*)(c_imageFeatures.p1_theta_data + n * c_imageFeatures.p1_theta_step);
}
static __device__ float2* p2_pos(int n)
{
return (float2*)(c_imageFeatures.p2_pos_data + n * c_imageFeatures.p2_pos_step);
}
static __device__ float* d12(int n)
{
return (float*)(c_imageFeatures.d12_data + n * c_imageFeatures.d12_step);
}
static __device__ float2* r1(int n)
{
return (float2*)(c_imageFeatures.r1_data + n * c_imageFeatures.r1_step);
}
static __device__ float2* r2(int n)
{
return (float2*)(c_imageFeatures.r2_data + n * c_imageFeatures.r2_step);
}
};
__device__ float clampAngle(float a)
{
float res = a;
while (res > 2.0f * CV_PI_F)
res -= 2.0f * CV_PI_F;
while (res < 0.0f)
res += 2.0f * CV_PI_F;
return res;
}
__device__ bool angleEq(float a, float b, float eps)
{
return (::fabs(clampAngle(a - b)) <= eps);
}
template <class FT, bool isTempl>
__global__ void GHT_Guil_Full_buildFeatureList(const unsigned int* coordList, const float* thetaList, const int pointsCount,
int* sizes, const int maxSize,
const float xi, const float angleEpsilon, const float alphaScale,
const float2 center, const float maxDist)
{
const float p1_theta = thetaList[blockIdx.x];
const unsigned int coord1 = coordList[blockIdx.x];
float2 p1_pos;
p1_pos.x = (coord1 & 0xFFFF);
p1_pos.y = (coord1 >> 16) & 0xFFFF;
for (int i = threadIdx.x; i < pointsCount; i += blockDim.x)
{
const float p2_theta = thetaList[i];
const unsigned int coord2 = coordList[i];
float2 p2_pos;
p2_pos.x = (coord2 & 0xFFFF);
p2_pos.y = (coord2 >> 16) & 0xFFFF;
if (angleEq(p1_theta - p2_theta, xi, angleEpsilon))
{
const float2 d = p1_pos - p2_pos;
float alpha12 = clampAngle(::atan2(d.y, d.x) - p1_theta);
float d12 = ::sqrtf(d.x * d.x + d.y * d.y);
if (d12 > maxDist)
continue;
float2 r1 = p1_pos - center;
float2 r2 = p2_pos - center;
const int n = __float2int_rn(alpha12 * alphaScale);
const int ind = ::atomicAdd(sizes + n, 1);
if (ind < maxSize)
{
if (!isTempl)
{
FT::p1_pos(n)[ind] = p1_pos;
FT::p2_pos(n)[ind] = p2_pos;
}
FT::p1_theta(n)[ind] = p1_theta;
FT::d12(n)[ind] = d12;
if (isTempl)
{
FT::r1(n)[ind] = r1;
FT::r2(n)[ind] = r2;
}
}
}
}
}
template <class FT, bool isTempl>
void GHT_Guil_Full_buildFeatureList_caller(const unsigned int* coordList, const float* thetaList, int pointsCount,
int* sizes, int maxSize,
float xi, float angleEpsilon, int levels,
float2 center, float maxDist)
{
const dim3 block(256);
const dim3 grid(pointsCount);
const float alphaScale = levels / (2.0f * CV_PI_F);
GHT_Guil_Full_buildFeatureList<FT, isTempl><<<grid, block>>>(coordList, thetaList, pointsCount,
sizes, maxSize,
xi * (CV_PI_F / 180.0f), angleEpsilon * (CV_PI_F / 180.0f), alphaScale,
center, maxDist);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
thrust::device_ptr<int> sizesPtr(sizes);
thrust::transform(sizesPtr, sizesPtr + levels + 1, sizesPtr, device::bind2nd(device::minimum<int>(), maxSize));
}
void GHT_Guil_Full_buildTemplFeatureList_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount,
int* sizes, int maxSize,
float xi, float angleEpsilon, int levels,
float2 center, float maxDist)
{
GHT_Guil_Full_buildFeatureList_caller<TemplFeatureTable, true>(coordList, thetaList, pointsCount,
sizes, maxSize,
xi, angleEpsilon, levels,
center, maxDist);
}
void GHT_Guil_Full_buildImageFeatureList_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount,
int* sizes, int maxSize,
float xi, float angleEpsilon, int levels,
float2 center, float maxDist)
{
GHT_Guil_Full_buildFeatureList_caller<ImageFeatureTable, false>(coordList, thetaList, pointsCount,
sizes, maxSize,
xi, angleEpsilon, levels,
center, maxDist);
}
__global__ void GHT_Guil_Full_calcOHist(const int* templSizes, const int* imageSizes, int* OHist,
const float minAngle, const float maxAngle, const float iAngleStep, const int angleRange)
{
extern __shared__ int s_OHist[];
for (int i = threadIdx.x; i <= angleRange; i += blockDim.x)
s_OHist[i] = 0;
__syncthreads();
const int tIdx = blockIdx.x;
const int level = blockIdx.y;
const int tSize = templSizes[level];
if (tIdx < tSize)
{
const int imSize = imageSizes[level];
const float t_p1_theta = TemplFeatureTable::p1_theta(level)[tIdx];
for (int i = threadIdx.x; i < imSize; i += blockDim.x)
{
const float im_p1_theta = ImageFeatureTable::p1_theta(level)[i];
const float angle = clampAngle(im_p1_theta - t_p1_theta);
if (angle >= minAngle && angle <= maxAngle)
{
const int n = __float2int_rn((angle - minAngle) * iAngleStep);
Emulation::smem::atomicAdd(&s_OHist[n], 1);
}
}
}
__syncthreads();
for (int i = threadIdx.x; i <= angleRange; i += blockDim.x)
::atomicAdd(OHist + i, s_OHist[i]);
}
void GHT_Guil_Full_calcOHist_gpu(const int* templSizes, const int* imageSizes, int* OHist,
float minAngle, float maxAngle, float angleStep, int angleRange,
int levels, int tMaxSize)
{
const dim3 block(256);
const dim3 grid(tMaxSize, levels + 1);
minAngle *= (CV_PI_F / 180.0f);
maxAngle *= (CV_PI_F / 180.0f);
angleStep *= (CV_PI_F / 180.0f);
const size_t smemSize = (angleRange + 1) * sizeof(float);
GHT_Guil_Full_calcOHist<<<grid, block, smemSize>>>(templSizes, imageSizes, OHist,
minAngle, maxAngle, 1.0f / angleStep, angleRange);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void GHT_Guil_Full_calcSHist(const int* templSizes, const int* imageSizes, int* SHist,
const float angle, const float angleEpsilon,
const float minScale, const float maxScale, const float iScaleStep, const int scaleRange)
{
extern __shared__ int s_SHist[];
for (int i = threadIdx.x; i <= scaleRange; i += blockDim.x)
s_SHist[i] = 0;
__syncthreads();
const int tIdx = blockIdx.x;
const int level = blockIdx.y;
const int tSize = templSizes[level];
if (tIdx < tSize)
{
const int imSize = imageSizes[level];
const float t_p1_theta = TemplFeatureTable::p1_theta(level)[tIdx] + angle;
const float t_d12 = TemplFeatureTable::d12(level)[tIdx] + angle;
for (int i = threadIdx.x; i < imSize; i += blockDim.x)
{
const float im_p1_theta = ImageFeatureTable::p1_theta(level)[i];
const float im_d12 = ImageFeatureTable::d12(level)[i];
if (angleEq(im_p1_theta, t_p1_theta, angleEpsilon))
{
const float scale = im_d12 / t_d12;
if (scale >= minScale && scale <= maxScale)
{
const int s = __float2int_rn((scale - minScale) * iScaleStep);
Emulation::smem::atomicAdd(&s_SHist[s], 1);
}
}
}
}
__syncthreads();
for (int i = threadIdx.x; i <= scaleRange; i += blockDim.x)
::atomicAdd(SHist + i, s_SHist[i]);
}
void GHT_Guil_Full_calcSHist_gpu(const int* templSizes, const int* imageSizes, int* SHist,
float angle, float angleEpsilon,
float minScale, float maxScale, float iScaleStep, int scaleRange,
int levels, int tMaxSize)
{
const dim3 block(256);
const dim3 grid(tMaxSize, levels + 1);
angle *= (CV_PI_F / 180.0f);
angleEpsilon *= (CV_PI_F / 180.0f);
const size_t smemSize = (scaleRange + 1) * sizeof(float);
GHT_Guil_Full_calcSHist<<<grid, block, smemSize>>>(templSizes, imageSizes, SHist,
angle, angleEpsilon,
minScale, maxScale, iScaleStep, scaleRange);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void GHT_Guil_Full_calcPHist(const int* templSizes, const int* imageSizes, PtrStepSzi PHist,
const float angle, const float sinVal, const float cosVal, const float angleEpsilon, const float scale,
const float idp)
{
const int tIdx = blockIdx.x;
const int level = blockIdx.y;
const int tSize = templSizes[level];
if (tIdx < tSize)
{
const int imSize = imageSizes[level];
const float t_p1_theta = TemplFeatureTable::p1_theta(level)[tIdx] + angle;
float2 r1 = TemplFeatureTable::r1(level)[tIdx];
float2 r2 = TemplFeatureTable::r2(level)[tIdx];
r1 = r1 * scale;
r2 = r2 * scale;
r1 = make_float2(cosVal * r1.x - sinVal * r1.y, sinVal * r1.x + cosVal * r1.y);
r2 = make_float2(cosVal * r2.x - sinVal * r2.y, sinVal * r2.x + cosVal * r2.y);
for (int i = threadIdx.x; i < imSize; i += blockDim.x)
{
const float im_p1_theta = ImageFeatureTable::p1_theta(level)[i];
const float2 im_p1_pos = ImageFeatureTable::p1_pos(level)[i];
const float2 im_p2_pos = ImageFeatureTable::p2_pos(level)[i];
if (angleEq(im_p1_theta, t_p1_theta, angleEpsilon))
{
float2 c1, c2;
c1 = im_p1_pos - r1;
c1 = c1 * idp;
c2 = im_p2_pos - r2;
c2 = c2 * idp;
if (::fabs(c1.x - c2.x) > 1 || ::fabs(c1.y - c2.y) > 1)
continue;
if (c1.y >= 0 && c1.y < PHist.rows - 2 && c1.x >= 0 && c1.x < PHist.cols - 2)
::atomicAdd(PHist.ptr(__float2int_rn(c1.y) + 1) + __float2int_rn(c1.x) + 1, 1);
}
}
}
}
void GHT_Guil_Full_calcPHist_gpu(const int* templSizes, const int* imageSizes, PtrStepSzi PHist,
float angle, float angleEpsilon, float scale,
float dp,
int levels, int tMaxSize)
{
const dim3 block(256);
const dim3 grid(tMaxSize, levels + 1);
angle *= (CV_PI_F / 180.0f);
angleEpsilon *= (CV_PI_F / 180.0f);
const float sinVal = ::sinf(angle);
const float cosVal = ::cosf(angle);
cudaSafeCall( cudaFuncSetCacheConfig(GHT_Guil_Full_calcPHist, cudaFuncCachePreferL1) );
GHT_Guil_Full_calcPHist<<<grid, block>>>(templSizes, imageSizes, PHist,
angle, sinVal, cosVal, angleEpsilon, scale,
1.0f / dp);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void GHT_Guil_Full_findPosInHist(const PtrStepSzi hist, float4* out, int3* votes, const int maxSize,
const float angle, const int angleVotes, const float scale, const int scaleVotes,
const float dp, const int threshold)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= hist.cols - 2 || y >= hist.rows - 2)
return;
const int curVotes = hist(y + 1, x + 1);
if (curVotes > threshold &&
curVotes > hist(y + 1, x) &&
curVotes >= hist(y + 1, x + 2) &&
curVotes > hist(y, x + 1) &&
curVotes >= hist(y + 2, x + 1))
{
const int ind = ::atomicAdd(&g_counter, 1);
if (ind < maxSize)
{
out[ind] = make_float4(x * dp, y * dp, scale, angle);
votes[ind] = make_int3(curVotes, scaleVotes, angleVotes);
}
}
}
int GHT_Guil_Full_findPosInHist_gpu(PtrStepSzi hist, float4* out, int3* votes, int curSize, int maxSize,
float angle, int angleVotes, float scale, int scaleVotes,
float dp, int threshold)
{
void* counterPtr;
cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) );
cudaSafeCall( cudaMemcpy(counterPtr, &curSize, sizeof(int), cudaMemcpyHostToDevice) );
const dim3 block(32, 8);
const dim3 grid(divUp(hist.cols - 2, block.x), divUp(hist.rows - 2, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(GHT_Guil_Full_findPosInHist, cudaFuncCachePreferL1) );
GHT_Guil_Full_findPosInHist<<<grid, block>>>(hist, out, votes, maxSize,
angle, angleVotes, scale, scaleVotes,
dp, threshold);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
int totalCount;
cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) );
totalCount = ::min(totalCount, maxSize);
return totalCount;
}
}
}}}
#endif /* CUDA_DISABLER */
|
ec8b4bf9cbbef69ba26ff41f17802dd30e9b569c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <rocblas.h>
#include <cstdlib>
#include <hiprand/hiprand.h>
#include <iostream>
#include "cuda_multi_gemm_unif.cu"
#include <ctime> //for time measurement
//To run this code, nvcc testGemm.cu -lcublas -o testGemm
//Print matrix A(nr_rows_A, nr_cols_A) storage in column-major format
void print_matrix(const float *A, int nr_rows_A, int nr_cols_A) {
int m = 0;
for(int i = 0; i < nr_rows_A; ++i){
for(int j = 0; j < nr_cols_A; ++j){
std::cout << A[j * nr_rows_A + i] << " ";
//std::cout << A[m] <<" ";
m++;
}
std::cout << std::endl;
}
std::cout << std::endl;
}
int main() {
// Allocate 3 arrays on CPU
int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C, nr_cols_C, nr_rows_D, nr_cols_D, nr_rows_E, nr_cols_E;
// for simplicity we are going to use square arrays
int m = 12, k=6, n = 6, nelt = 100;
nr_rows_A = m;
nr_cols_A = k;
nr_rows_B = k;
nr_cols_B = n*nelt;
nr_rows_C = m;
nr_cols_C = n*nelt;
nr_rows_D = n;
nr_cols_D = m;
nr_rows_E = m;
nr_cols_E = m*nelt;
float *h_A = (float *)malloc(nr_rows_A * nr_cols_A * sizeof(float));
float *h_B = (float *)malloc(nr_rows_B * nr_cols_B * sizeof(float));
float *h_C = (float *)malloc(nr_rows_C * nr_cols_C * sizeof(float));
float *h_D = (float *)malloc(nr_rows_D * nr_cols_D * sizeof(float));
float *h_E = (float *)malloc(nr_rows_E * nr_cols_E * sizeof(float));
// Allocate 3 arrays on GPU
float *d_A, *d_B, *d_C, *d_D, *d_E;
hipMalloc(&d_A,nr_rows_A * nr_cols_A * sizeof(float));
hipMalloc(&d_B,nr_rows_B * nr_cols_B * sizeof(float));
hipMalloc(&d_C,nr_rows_C * nr_cols_C * sizeof(float));
hipMalloc(&d_D,nr_rows_D * nr_cols_D * sizeof(float));
hipMalloc(&d_E,nr_rows_E * nr_cols_E * sizeof(float));
// Fill the arrays A, B and D on CPU
for(int i = 0; i < nr_rows_A; ++i){
for(int j = 0; j < nr_cols_A; ++j){
h_A[i * nr_cols_A + j]= (i * nr_cols_A + j) ;
}
}
for(int i = 0; i < nr_rows_B; ++i){
for(int j = 0; j < nr_cols_B; ++j){
h_B[i * nr_cols_B + j]= (0.01*(i * nr_cols_B + j)) ;
}
}
for(int i = 0; i < nr_rows_D; ++i){
for(int j = 0; j < nr_cols_D; ++j){
h_D[i * nr_cols_D + j]= (0.001*(i * nr_cols_D + j + 10)) ;
}
}
// Optionally we can copy the data to GPU and print the arrays
hipMemcpy(d_A,h_A,nr_rows_A * nr_cols_A * sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(d_B,h_B,nr_rows_B * nr_cols_B * sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(d_D,h_D,nr_rows_D * nr_cols_D * sizeof(float),hipMemcpyHostToDevice);
/*std::cout << "A =" << std::endl;
print_matrix(h_A, nr_rows_A, nr_cols_A);
std::cout << "B =" << std::endl;
print_matrix(h_B, nr_rows_B, nr_cols_B);
std::cout << "D =" << std::endl;
print_matrix(h_D, nr_rows_D, nr_cols_D);*/
// Multiply A and B on GPU using hipblasSgemm
//measure time
clock_t begin = clock();
const float alf = 1;
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
hipblasHandle_t handle;
hipblasCreate(&handle);
for(int i = 0; i<10000; i++){
hipMemset(d_C, 0.0, m*n*nelt);
hipMemset(d_E, 0.0, m*n*nelt);
// A(m,k) * B(k,n*nelt) = C(m,n*nelt)
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n*nelt, k, alpha, d_A, m, d_B, k, beta, d_C,m);
//c(m,n)*nelt * D(m,n) = E(m,n)*nelt
hipblasSgemmStridedBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, m, n, alpha, d_C, m, m*n, d_D, n, 0, beta, d_E,m, m*m, nelt);
}
hipblasDestroy(handle);
clock_t end = clock();
double elapsed_sec = double(end-begin)/CLOCKS_PER_SEC;
std::cout << "hipblasSgemm time " << elapsed_sec << ' ' << end <<' ' << begin << std::endl;
std::cout << "hipblasSgemm result" << std::endl;
// Copy (and print) the result on host memory
hipMemcpy(h_C,d_C,nr_rows_C * nr_cols_C * sizeof(float),hipMemcpyDeviceToHost);
hipMemcpy(h_E,d_E,nr_rows_E * nr_cols_E * sizeof(float),hipMemcpyDeviceToHost);
/*std::cout << "C =" << std::endl;
print_matrix(h_C, nr_rows_C, nr_cols_C);
std::cout << "E =" << std::endl;
print_matrix(h_E, nr_rows_E, nr_cols_E);*/
//start gemm test
//measure time
clock_t begin2 = clock();
hipStream_t stream;
hipStreamCreate( &stream );
const float alpha2 = 1;
const float beta2 = 0;
int blockSize = 2, gridSize;
for(int i = 0; i< 10000; i++){
gridSize = (int)ceil((float)m*n*nelt/blockSize);
hipMemset(d_C, 0.0, m*n*nelt);
hipMemset(d_E, 0.0, m*n*nelt);
cuda_multi_gemm_unif(stream,'N', 'N', m, n, k, &alpha2, d_A, m, 0, d_B, k, k*n, &beta2, d_C, m, m*n, nelt, gridSize);
gridSize = (int)ceil((float)m*m*nelt/blockSize);
cuda_multi_gemm_unif(stream,'N', 'N', m, m, n, &alpha2, d_C, m, m*n, d_D, n, 0, &beta2, d_E, m, m*m, nelt, gridSize);
}
clock_t end2 = clock();
double elapsed_sec2 = double(end2-begin2)/CLOCKS_PER_SEC;
std::cout << "cuda_multi_gemm_unif time " << elapsed_sec2 << ' ' << end2 <<' ' << begin2 << std::endl;
std::cout << "cuda_multi_gemm_unif result" << std::endl;
hipMemcpy(h_C,d_C,nr_rows_C * nr_cols_C * sizeof(float),hipMemcpyDeviceToHost);
/*std::cout << "C =" << std::endl;
print_matrix(h_C, nr_rows_C, nr_cols_C);
hipMemcpy(h_E,d_E,nr_rows_E * nr_cols_E * sizeof(float),hipMemcpyDeviceToHost);
std::cout << "E =" << std::endl;
print_matrix(h_E, nr_rows_E, nr_cols_E);*/
/* hipMemset(d_C, 0.0, nr_rows_A*nr_cols_B);
cuda_multi_gemm_unif(stream,'T', 'T', nr_rows_A, nr_cols_B, nr_cols_A, &alpha2, d_A, nr_cols_A, nr_rows_A*nr_cols_A, d_B, nr_cols_B, nr_rows_B*nr_cols_B, &beta2, d_C, nr_rows_A, nr_rows_A*nr_cols_B, 1, gridSize);
std::cout << "end gemm T T" << std::endl;
hipMemcpy(h_C,d_C,nr_rows_C * nr_cols_C * sizeof(float),hipMemcpyDeviceToHost);
std::cout << "C =" << std::endl;
print_matrix(h_C, nr_rows_C, nr_cols_C);
hipMemset(d_C, 0.0, nr_rows_A*nr_cols_B);
cuda_multi_gemm_unif(stream,'T', 'N', nr_rows_A, nr_cols_B, nr_cols_A, &alpha2, d_A, nr_cols_A, nr_rows_A*nr_cols_A, d_B, nr_rows_B, nr_rows_B*nr_cols_B, &beta2, d_C, nr_rows_A, nr_rows_A*nr_cols_B, 1, gridSize);
std::cout << "end gemm T N" << std::endl;
hipMemcpy(h_C,d_C,nr_rows_C * nr_cols_C * sizeof(float),hipMemcpyDeviceToHost);
std::cout << "C =" << std::endl;
print_matrix(h_C, nr_rows_C, nr_cols_C);
hipMemset(d_C, 0.0, nr_rows_A*nr_cols_B);
cuda_multi_gemm_unif(stream,'N', 'T', nr_rows_A, nr_cols_B, nr_cols_A, &alpha2, d_A, nr_rows_A, nr_rows_A*nr_cols_A, d_B, nr_cols_B, nr_rows_B*nr_cols_B, &beta2, d_C, nr_rows_A, nr_rows_A*nr_cols_B, 1, gridSize);
std::cout << "end gemm N T" << std::endl;
hipMemcpy(h_C,d_C,nr_rows_C * nr_cols_C * sizeof(float),hipMemcpyDeviceToHost);
std::cout << "C =" << std::endl;
print_matrix(h_C, nr_rows_C, nr_cols_C);*/
//Free GPU memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipFree(d_D);
hipFree(d_E);
// Free CPU memory
free(h_A);
free(h_B);
free(h_C);
free(h_D);
free(h_E);
return 0;
}
|
ec8b4bf9cbbef69ba26ff41f17802dd30e9b569c.cu
|
#include <cublas_v2.h>
#include <cstdlib>
#include <curand.h>
#include <iostream>
#include "cuda_multi_gemm_unif.cu"
#include <ctime> //for time measurement
//To run this code, nvcc testGemm.cu -lcublas -o testGemm
//Print matrix A(nr_rows_A, nr_cols_A) storage in column-major format
void print_matrix(const float *A, int nr_rows_A, int nr_cols_A) {
int m = 0;
for(int i = 0; i < nr_rows_A; ++i){
for(int j = 0; j < nr_cols_A; ++j){
std::cout << A[j * nr_rows_A + i] << " ";
//std::cout << A[m] <<" ";
m++;
}
std::cout << std::endl;
}
std::cout << std::endl;
}
int main() {
// Allocate 3 arrays on CPU
int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C, nr_cols_C, nr_rows_D, nr_cols_D, nr_rows_E, nr_cols_E;
// for simplicity we are going to use square arrays
int m = 12, k=6, n = 6, nelt = 100;
nr_rows_A = m;
nr_cols_A = k;
nr_rows_B = k;
nr_cols_B = n*nelt;
nr_rows_C = m;
nr_cols_C = n*nelt;
nr_rows_D = n;
nr_cols_D = m;
nr_rows_E = m;
nr_cols_E = m*nelt;
float *h_A = (float *)malloc(nr_rows_A * nr_cols_A * sizeof(float));
float *h_B = (float *)malloc(nr_rows_B * nr_cols_B * sizeof(float));
float *h_C = (float *)malloc(nr_rows_C * nr_cols_C * sizeof(float));
float *h_D = (float *)malloc(nr_rows_D * nr_cols_D * sizeof(float));
float *h_E = (float *)malloc(nr_rows_E * nr_cols_E * sizeof(float));
// Allocate 3 arrays on GPU
float *d_A, *d_B, *d_C, *d_D, *d_E;
cudaMalloc(&d_A,nr_rows_A * nr_cols_A * sizeof(float));
cudaMalloc(&d_B,nr_rows_B * nr_cols_B * sizeof(float));
cudaMalloc(&d_C,nr_rows_C * nr_cols_C * sizeof(float));
cudaMalloc(&d_D,nr_rows_D * nr_cols_D * sizeof(float));
cudaMalloc(&d_E,nr_rows_E * nr_cols_E * sizeof(float));
// Fill the arrays A, B and D on CPU
for(int i = 0; i < nr_rows_A; ++i){
for(int j = 0; j < nr_cols_A; ++j){
h_A[i * nr_cols_A + j]= (i * nr_cols_A + j) ;
}
}
for(int i = 0; i < nr_rows_B; ++i){
for(int j = 0; j < nr_cols_B; ++j){
h_B[i * nr_cols_B + j]= (0.01*(i * nr_cols_B + j)) ;
}
}
for(int i = 0; i < nr_rows_D; ++i){
for(int j = 0; j < nr_cols_D; ++j){
h_D[i * nr_cols_D + j]= (0.001*(i * nr_cols_D + j + 10)) ;
}
}
// Optionally we can copy the data to GPU and print the arrays
cudaMemcpy(d_A,h_A,nr_rows_A * nr_cols_A * sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_B,h_B,nr_rows_B * nr_cols_B * sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_D,h_D,nr_rows_D * nr_cols_D * sizeof(float),cudaMemcpyHostToDevice);
/*std::cout << "A =" << std::endl;
print_matrix(h_A, nr_rows_A, nr_cols_A);
std::cout << "B =" << std::endl;
print_matrix(h_B, nr_rows_B, nr_cols_B);
std::cout << "D =" << std::endl;
print_matrix(h_D, nr_rows_D, nr_cols_D);*/
// Multiply A and B on GPU using cublasSgemm
//measure time
clock_t begin = clock();
const float alf = 1;
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
cublasHandle_t handle;
cublasCreate(&handle);
for(int i = 0; i<10000; i++){
cudaMemset(d_C, 0.0, m*n*nelt);
cudaMemset(d_E, 0.0, m*n*nelt);
// A(m,k) * B(k,n*nelt) = C(m,n*nelt)
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n*nelt, k, alpha, d_A, m, d_B, k, beta, d_C,m);
//c(m,n)*nelt * D(m,n) = E(m,n)*nelt
cublasSgemmStridedBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, m, n, alpha, d_C, m, m*n, d_D, n, 0, beta, d_E,m, m*m, nelt);
}
cublasDestroy(handle);
clock_t end = clock();
double elapsed_sec = double(end-begin)/CLOCKS_PER_SEC;
std::cout << "cublasSgemm time " << elapsed_sec << ' ' << end <<' ' << begin << std::endl;
std::cout << "cublasSgemm result" << std::endl;
// Copy (and print) the result on host memory
cudaMemcpy(h_C,d_C,nr_rows_C * nr_cols_C * sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(h_E,d_E,nr_rows_E * nr_cols_E * sizeof(float),cudaMemcpyDeviceToHost);
/*std::cout << "C =" << std::endl;
print_matrix(h_C, nr_rows_C, nr_cols_C);
std::cout << "E =" << std::endl;
print_matrix(h_E, nr_rows_E, nr_cols_E);*/
//start gemm test
//measure time
clock_t begin2 = clock();
cudaStream_t stream;
cudaStreamCreate( &stream );
const float alpha2 = 1;
const float beta2 = 0;
int blockSize = 2, gridSize;
for(int i = 0; i< 10000; i++){
gridSize = (int)ceil((float)m*n*nelt/blockSize);
cudaMemset(d_C, 0.0, m*n*nelt);
cudaMemset(d_E, 0.0, m*n*nelt);
cuda_multi_gemm_unif(stream,'N', 'N', m, n, k, &alpha2, d_A, m, 0, d_B, k, k*n, &beta2, d_C, m, m*n, nelt, gridSize);
gridSize = (int)ceil((float)m*m*nelt/blockSize);
cuda_multi_gemm_unif(stream,'N', 'N', m, m, n, &alpha2, d_C, m, m*n, d_D, n, 0, &beta2, d_E, m, m*m, nelt, gridSize);
}
clock_t end2 = clock();
double elapsed_sec2 = double(end2-begin2)/CLOCKS_PER_SEC;
std::cout << "cuda_multi_gemm_unif time " << elapsed_sec2 << ' ' << end2 <<' ' << begin2 << std::endl;
std::cout << "cuda_multi_gemm_unif result" << std::endl;
cudaMemcpy(h_C,d_C,nr_rows_C * nr_cols_C * sizeof(float),cudaMemcpyDeviceToHost);
/*std::cout << "C =" << std::endl;
print_matrix(h_C, nr_rows_C, nr_cols_C);
cudaMemcpy(h_E,d_E,nr_rows_E * nr_cols_E * sizeof(float),cudaMemcpyDeviceToHost);
std::cout << "E =" << std::endl;
print_matrix(h_E, nr_rows_E, nr_cols_E);*/
/* cudaMemset(d_C, 0.0, nr_rows_A*nr_cols_B);
cuda_multi_gemm_unif(stream,'T', 'T', nr_rows_A, nr_cols_B, nr_cols_A, &alpha2, d_A, nr_cols_A, nr_rows_A*nr_cols_A, d_B, nr_cols_B, nr_rows_B*nr_cols_B, &beta2, d_C, nr_rows_A, nr_rows_A*nr_cols_B, 1, gridSize);
std::cout << "end gemm T T" << std::endl;
cudaMemcpy(h_C,d_C,nr_rows_C * nr_cols_C * sizeof(float),cudaMemcpyDeviceToHost);
std::cout << "C =" << std::endl;
print_matrix(h_C, nr_rows_C, nr_cols_C);
cudaMemset(d_C, 0.0, nr_rows_A*nr_cols_B);
cuda_multi_gemm_unif(stream,'T', 'N', nr_rows_A, nr_cols_B, nr_cols_A, &alpha2, d_A, nr_cols_A, nr_rows_A*nr_cols_A, d_B, nr_rows_B, nr_rows_B*nr_cols_B, &beta2, d_C, nr_rows_A, nr_rows_A*nr_cols_B, 1, gridSize);
std::cout << "end gemm T N" << std::endl;
cudaMemcpy(h_C,d_C,nr_rows_C * nr_cols_C * sizeof(float),cudaMemcpyDeviceToHost);
std::cout << "C =" << std::endl;
print_matrix(h_C, nr_rows_C, nr_cols_C);
cudaMemset(d_C, 0.0, nr_rows_A*nr_cols_B);
cuda_multi_gemm_unif(stream,'N', 'T', nr_rows_A, nr_cols_B, nr_cols_A, &alpha2, d_A, nr_rows_A, nr_rows_A*nr_cols_A, d_B, nr_cols_B, nr_rows_B*nr_cols_B, &beta2, d_C, nr_rows_A, nr_rows_A*nr_cols_B, 1, gridSize);
std::cout << "end gemm N T" << std::endl;
cudaMemcpy(h_C,d_C,nr_rows_C * nr_cols_C * sizeof(float),cudaMemcpyDeviceToHost);
std::cout << "C =" << std::endl;
print_matrix(h_C, nr_rows_C, nr_cols_C);*/
//Free GPU memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFree(d_D);
cudaFree(d_E);
// Free CPU memory
free(h_A);
free(h_B);
free(h_C);
free(h_D);
free(h_E);
return 0;
}
|
3c5cde22e45f0dd9410a6f107b6fff65778ea28e.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/triangular_solve_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/expand_kernel.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/common_shape.h"
// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/memory.h"
namespace phi {
template <typename T, typename Context>
void TriangularSolveKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
bool upper,
bool transpose,
bool unitriangular,
DenseTensor* out) {
// get broadcast dim
std::vector<int64_t> x_bst_dims_vec;
std::vector<int64_t> y_bst_dims_vec;
std::tie(x_bst_dims_vec, y_bst_dims_vec) =
funcs::MatrixGetBroadcastDims(x, y);
int x_bst_ndim = x_bst_dims_vec.size();
int y_bst_ndim = y_bst_dims_vec.size();
// Tensor broadcast to 'out' and temp 'x_bst'
IntArray x_bst_dims(x_bst_dims_vec);
DenseTensor x_bst = phi::Empty<T, Context>(dev_ctx, x_bst_dims);
const T* x_bst_data = x_bst.data<T>();
ExpandKernel<T, Context>(dev_ctx, x, x_bst_dims, &x_bst);
out->Resize(phi::make_ddim(y_bst_dims_vec));
T* out_data = dev_ctx.template Alloc<T>(out);
IntArray y_bst_dims(y_bst_dims_vec);
ExpandKernel<T, Context>(dev_ctx, y, y_bst_dims, out);
// calculate use cublas library
CBLAS_UPLO uplo = upper ? CblasUpper : CblasLower;
CBLAS_TRANSPOSE transA = transpose ? CblasTrans : CblasNoTrans;
CBLAS_DIAG diag = unitriangular ? CblasUnit : CblasNonUnit;
int M = static_cast<int>(y_bst_dims_vec[y_bst_ndim - 2]);
int N = static_cast<int>(y_bst_dims_vec[y_bst_ndim - 1]);
auto lda = ::max(1, M);
auto ldb = ::max(1, N);
int batch_size = 1;
for (int i = 0; i < x_bst_ndim - 2; i++) {
batch_size *= x_bst_dims_vec[i];
}
auto blas = phi::funcs::GetBlas<GPUContext, T>(dev_ctx);
if (batch_size <= 8 && M >= 64) {
for (auto i = 0; i < batch_size; i++) {
blas.TRSM(CblasLeft,
uplo,
transA,
diag,
M,
N,
T(1),
x_bst_data + i * M * M,
lda,
out_data + i * N * M,
ldb);
}
} else {
std::vector<const T*> cpu_ptrs(batch_size * 2);
for (int i = 0; i < batch_size; ++i) {
cpu_ptrs[i] = x_bst_data + i * M * M;
cpu_ptrs[i + batch_size] = out_data + i * M * N;
}
// Copy the addresses of A and tmp_b from host to device.
paddle::memory::allocation::AllocationPtr tmp_gpu_ptrs_data =
paddle::memory::Alloc(dev_ctx, cpu_ptrs.size() * sizeof(T*));
paddle::memory::Copy(dev_ctx.GetPlace(),
tmp_gpu_ptrs_data->ptr(),
paddle::platform::CPUPlace(),
static_cast<void*>(cpu_ptrs.data()),
cpu_ptrs.size() * sizeof(T*),
dev_ctx.stream());
const T** gpu_a_ptrs =
reinterpret_cast<const T**>(tmp_gpu_ptrs_data->ptr());
T** gpu_b_ptrs =
reinterpret_cast<T**>(tmp_gpu_ptrs_data->ptr()) + batch_size;
blas.BatchedTRSM(CblasLeft,
uplo,
transA,
diag,
M,
N,
static_cast<T>(1.0),
gpu_a_ptrs,
lda,
gpu_b_ptrs,
ldb,
batch_size);
}
}
} // namespace phi
PD_REGISTER_KERNEL(triangular_solve,
GPU,
ALL_LAYOUT,
phi::TriangularSolveKernel,
float,
double) {}
|
3c5cde22e45f0dd9410a6f107b6fff65778ea28e.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/triangular_solve_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/expand_kernel.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/common_shape.h"
// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/memory.h"
namespace phi {
template <typename T, typename Context>
void TriangularSolveKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
bool upper,
bool transpose,
bool unitriangular,
DenseTensor* out) {
// get broadcast dim
std::vector<int64_t> x_bst_dims_vec;
std::vector<int64_t> y_bst_dims_vec;
std::tie(x_bst_dims_vec, y_bst_dims_vec) =
funcs::MatrixGetBroadcastDims(x, y);
int x_bst_ndim = x_bst_dims_vec.size();
int y_bst_ndim = y_bst_dims_vec.size();
// Tensor broadcast to 'out' and temp 'x_bst'
IntArray x_bst_dims(x_bst_dims_vec);
DenseTensor x_bst = phi::Empty<T, Context>(dev_ctx, x_bst_dims);
const T* x_bst_data = x_bst.data<T>();
ExpandKernel<T, Context>(dev_ctx, x, x_bst_dims, &x_bst);
out->Resize(phi::make_ddim(y_bst_dims_vec));
T* out_data = dev_ctx.template Alloc<T>(out);
IntArray y_bst_dims(y_bst_dims_vec);
ExpandKernel<T, Context>(dev_ctx, y, y_bst_dims, out);
// calculate use cublas library
CBLAS_UPLO uplo = upper ? CblasUpper : CblasLower;
CBLAS_TRANSPOSE transA = transpose ? CblasTrans : CblasNoTrans;
CBLAS_DIAG diag = unitriangular ? CblasUnit : CblasNonUnit;
int M = static_cast<int>(y_bst_dims_vec[y_bst_ndim - 2]);
int N = static_cast<int>(y_bst_dims_vec[y_bst_ndim - 1]);
auto lda = std::max(1, M);
auto ldb = std::max(1, N);
int batch_size = 1;
for (int i = 0; i < x_bst_ndim - 2; i++) {
batch_size *= x_bst_dims_vec[i];
}
auto blas = phi::funcs::GetBlas<GPUContext, T>(dev_ctx);
if (batch_size <= 8 && M >= 64) {
for (auto i = 0; i < batch_size; i++) {
blas.TRSM(CblasLeft,
uplo,
transA,
diag,
M,
N,
T(1),
x_bst_data + i * M * M,
lda,
out_data + i * N * M,
ldb);
}
} else {
std::vector<const T*> cpu_ptrs(batch_size * 2);
for (int i = 0; i < batch_size; ++i) {
cpu_ptrs[i] = x_bst_data + i * M * M;
cpu_ptrs[i + batch_size] = out_data + i * M * N;
}
// Copy the addresses of A and tmp_b from host to device.
paddle::memory::allocation::AllocationPtr tmp_gpu_ptrs_data =
paddle::memory::Alloc(dev_ctx, cpu_ptrs.size() * sizeof(T*));
paddle::memory::Copy(dev_ctx.GetPlace(),
tmp_gpu_ptrs_data->ptr(),
paddle::platform::CPUPlace(),
static_cast<void*>(cpu_ptrs.data()),
cpu_ptrs.size() * sizeof(T*),
dev_ctx.stream());
const T** gpu_a_ptrs =
reinterpret_cast<const T**>(tmp_gpu_ptrs_data->ptr());
T** gpu_b_ptrs =
reinterpret_cast<T**>(tmp_gpu_ptrs_data->ptr()) + batch_size;
blas.BatchedTRSM(CblasLeft,
uplo,
transA,
diag,
M,
N,
static_cast<T>(1.0),
gpu_a_ptrs,
lda,
gpu_b_ptrs,
ldb,
batch_size);
}
}
} // namespace phi
PD_REGISTER_KERNEL(triangular_solve,
GPU,
ALL_LAYOUT,
phi::TriangularSolveKernel,
float,
double) {}
|
5e8c5e0313452ca961c5aef1530864a634f78b72.hip
|
// !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2020 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#define EIGEN_USE_GPU
#include "ContinuousConvTransposeOpKernel.h"
#include "Open3D/Core/CUDAUtils.h"
#include "Open3D/ML/ContinuousConv/Detail/ContinuousConvTranspose.cuh"
using namespace open3d;
using namespace open3d::ml::detail;
using namespace tensorflow;
template <class TReal, class TIndex>
class ContinuousConvTransposeOpKernelCUDA
: public ContinuousConvTransposeOpKernel<TIndex> {
public:
explicit ContinuousConvTransposeOpKernelCUDA(
OpKernelConstruction* construction)
: ContinuousConvTransposeOpKernel<TIndex>(construction) {
texture_alignment = GetCUDACurrentDeviceTextureAlignment();
}
void Kernel(tensorflow::OpKernelContext* context,
const tensorflow::Tensor& filter,
const tensorflow::Tensor& out_positions,
const tensorflow::Tensor& out_importance,
const tensorflow::Tensor& extents,
const tensorflow::Tensor& offset,
const tensorflow::Tensor& inp_positions,
const tensorflow::Tensor& inp_features,
const tensorflow::Tensor& inp_neighbors_importance_sum,
const tensorflow::Tensor& inp_neighbors_row_splits,
const tensorflow::Tensor& neighbors_index,
const tensorflow::Tensor& neighbors_importance,
const tensorflow::Tensor& neighbors_row_splits,
const std::vector<int>& filter_dims,
const bool individual_extents,
const bool isotropic_extents,
const bool point_importances,
const bool has_neighbors_importances,
tensorflow::Tensor& out_features) {
auto device = context->eigen_gpu_device();
void* temp_ptr = nullptr;
size_t temp_size = 0;
size_t max_temp_size = 0;
// determine temp_size
CConvTransposeComputeFeaturesCUDA<TReal, TIndex>(
device.stream(), temp_ptr, temp_size, max_temp_size,
texture_alignment, out_features.flat<TReal>().data(),
filter_dims, filter.flat<TReal>().data(),
out_positions.shape().dim_size(0),
out_positions.flat<TReal>().data(),
point_importances ? out_importance.flat<TReal>().data()
: nullptr,
inp_positions.shape().dim_size(0),
inp_positions.flat<TReal>().data(),
inp_features.flat<TReal>().data(),
has_neighbors_importances
? inp_neighbors_importance_sum.flat<TReal>().data()
: nullptr,
(int64_t*)inp_neighbors_row_splits.flat<int64>().data(),
neighbors_index.shape().dim_size(0),
(TIndex*)neighbors_index.flat<TIndex>().data(),
has_neighbors_importances
? neighbors_importance.flat<TReal>().data()
: nullptr,
(int64_t*)neighbors_row_splits.flat<int64>().data(),
extents.flat<TReal>().data(), offset.flat<TReal>().data(),
this->interpolation, this->coordinate_mapping,
this->align_corners, individual_extents, isotropic_extents,
this->normalize);
temp_size =
::max(::min(size_t(this->max_temp_mem_MB) * 1024 * 1024,
max_temp_size),
temp_size);
Tensor temp_tensor;
TensorShape temp_shape({ssize_t(temp_size)});
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<uint8_t>::v(),
temp_shape, &temp_tensor));
temp_ptr = temp_tensor.flat<uint8_t>().data();
// actually run the operation
CConvTransposeComputeFeaturesCUDA<TReal, TIndex>(
device.stream(), temp_ptr, temp_size, max_temp_size,
texture_alignment, out_features.flat<TReal>().data(),
filter_dims, filter.flat<TReal>().data(),
out_positions.shape().dim_size(0),
out_positions.flat<TReal>().data(),
point_importances ? out_importance.flat<TReal>().data()
: nullptr,
inp_positions.shape().dim_size(0),
inp_positions.flat<TReal>().data(),
inp_features.flat<TReal>().data(),
has_neighbors_importances
? inp_neighbors_importance_sum.flat<TReal>().data()
: nullptr,
(int64_t*)inp_neighbors_row_splits.flat<int64>().data(),
neighbors_index.shape().dim_size(0),
(TIndex*)neighbors_index.flat<TIndex>().data(),
has_neighbors_importances
? neighbors_importance.flat<TReal>().data()
: nullptr,
(int64_t*)neighbors_row_splits.flat<int64>().data(),
extents.flat<TReal>().data(), offset.flat<TReal>().data(),
this->interpolation, this->coordinate_mapping,
this->align_corners, individual_extents, isotropic_extents,
this->normalize);
}
private:
int texture_alignment;
};
#define REG_KB(type, indextype) \
REGISTER_KERNEL_BUILDER( \
Name("Open3DContinuousConvTranspose") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("TReal") \
.TypeConstraint<indextype>("TIndex"), \
ContinuousConvTransposeOpKernelCUDA<type, indextype>);
REG_KB(float, int32)
#undef REG_KB
|
5e8c5e0313452ca961c5aef1530864a634f78b72.cu
|
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2020 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#define EIGEN_USE_GPU
#include "ContinuousConvTransposeOpKernel.h"
#include "Open3D/Core/CUDAUtils.h"
#include "Open3D/ML/ContinuousConv/Detail/ContinuousConvTranspose.cuh"
using namespace open3d;
using namespace open3d::ml::detail;
using namespace tensorflow;
template <class TReal, class TIndex>
class ContinuousConvTransposeOpKernelCUDA
: public ContinuousConvTransposeOpKernel<TIndex> {
public:
explicit ContinuousConvTransposeOpKernelCUDA(
OpKernelConstruction* construction)
: ContinuousConvTransposeOpKernel<TIndex>(construction) {
texture_alignment = GetCUDACurrentDeviceTextureAlignment();
}
void Kernel(tensorflow::OpKernelContext* context,
const tensorflow::Tensor& filter,
const tensorflow::Tensor& out_positions,
const tensorflow::Tensor& out_importance,
const tensorflow::Tensor& extents,
const tensorflow::Tensor& offset,
const tensorflow::Tensor& inp_positions,
const tensorflow::Tensor& inp_features,
const tensorflow::Tensor& inp_neighbors_importance_sum,
const tensorflow::Tensor& inp_neighbors_row_splits,
const tensorflow::Tensor& neighbors_index,
const tensorflow::Tensor& neighbors_importance,
const tensorflow::Tensor& neighbors_row_splits,
const std::vector<int>& filter_dims,
const bool individual_extents,
const bool isotropic_extents,
const bool point_importances,
const bool has_neighbors_importances,
tensorflow::Tensor& out_features) {
auto device = context->eigen_gpu_device();
void* temp_ptr = nullptr;
size_t temp_size = 0;
size_t max_temp_size = 0;
// determine temp_size
CConvTransposeComputeFeaturesCUDA<TReal, TIndex>(
device.stream(), temp_ptr, temp_size, max_temp_size,
texture_alignment, out_features.flat<TReal>().data(),
filter_dims, filter.flat<TReal>().data(),
out_positions.shape().dim_size(0),
out_positions.flat<TReal>().data(),
point_importances ? out_importance.flat<TReal>().data()
: nullptr,
inp_positions.shape().dim_size(0),
inp_positions.flat<TReal>().data(),
inp_features.flat<TReal>().data(),
has_neighbors_importances
? inp_neighbors_importance_sum.flat<TReal>().data()
: nullptr,
(int64_t*)inp_neighbors_row_splits.flat<int64>().data(),
neighbors_index.shape().dim_size(0),
(TIndex*)neighbors_index.flat<TIndex>().data(),
has_neighbors_importances
? neighbors_importance.flat<TReal>().data()
: nullptr,
(int64_t*)neighbors_row_splits.flat<int64>().data(),
extents.flat<TReal>().data(), offset.flat<TReal>().data(),
this->interpolation, this->coordinate_mapping,
this->align_corners, individual_extents, isotropic_extents,
this->normalize);
temp_size =
std::max(std::min(size_t(this->max_temp_mem_MB) * 1024 * 1024,
max_temp_size),
temp_size);
Tensor temp_tensor;
TensorShape temp_shape({ssize_t(temp_size)});
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<uint8_t>::v(),
temp_shape, &temp_tensor));
temp_ptr = temp_tensor.flat<uint8_t>().data();
// actually run the operation
CConvTransposeComputeFeaturesCUDA<TReal, TIndex>(
device.stream(), temp_ptr, temp_size, max_temp_size,
texture_alignment, out_features.flat<TReal>().data(),
filter_dims, filter.flat<TReal>().data(),
out_positions.shape().dim_size(0),
out_positions.flat<TReal>().data(),
point_importances ? out_importance.flat<TReal>().data()
: nullptr,
inp_positions.shape().dim_size(0),
inp_positions.flat<TReal>().data(),
inp_features.flat<TReal>().data(),
has_neighbors_importances
? inp_neighbors_importance_sum.flat<TReal>().data()
: nullptr,
(int64_t*)inp_neighbors_row_splits.flat<int64>().data(),
neighbors_index.shape().dim_size(0),
(TIndex*)neighbors_index.flat<TIndex>().data(),
has_neighbors_importances
? neighbors_importance.flat<TReal>().data()
: nullptr,
(int64_t*)neighbors_row_splits.flat<int64>().data(),
extents.flat<TReal>().data(), offset.flat<TReal>().data(),
this->interpolation, this->coordinate_mapping,
this->align_corners, individual_extents, isotropic_extents,
this->normalize);
}
private:
int texture_alignment;
};
#define REG_KB(type, indextype) \
REGISTER_KERNEL_BUILDER( \
Name("Open3DContinuousConvTranspose") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("TReal") \
.TypeConstraint<indextype>("TIndex"), \
ContinuousConvTransposeOpKernelCUDA<type, indextype>);
REG_KB(float, int32)
#undef REG_KB
|
eb8e3230176fc06676875b3a0976d4e8782a3183.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#define SIZE 1024
__global__ void VectorAdd(int *a, int *b, int*c,int n) {
int i = threadIdx.x;
if (i < n)
c[i] = a[i] + b[i];
}
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
a = (int *)malloc(SIZE*sizeof(int));
b = (int *)malloc(SIZE*sizeof(int));
c = (int *)malloc(SIZE*sizeof(int));
hipMalloc(&d_a, SIZE*sizeof(int));
hipMalloc(&d_b, SIZE*sizeof(int));
hipMalloc(&d_c, SIZE*sizeof(int));
for (int i = 0; i < SIZE; ++i) {
a[i] = i;
b[i] = i;
c[i] = 0;
}
hipMemcpy(d_a, a, SIZE*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, SIZE*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_c, c, SIZE*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( VectorAdd), dim3(1), dim3(SIZE), 0, 0, d_a, d_b, d_c, SIZE);
hipMemcpy(c, d_c, SIZE*sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < 10; ++i)
printf("c[%d] = %d\n", i, c[i]);
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
|
eb8e3230176fc06676875b3a0976d4e8782a3183.cu
|
#include<stdio.h>
#define SIZE 1024
__global__ void VectorAdd(int *a, int *b, int*c,int n) {
int i = threadIdx.x;
if (i < n)
c[i] = a[i] + b[i];
}
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
a = (int *)malloc(SIZE*sizeof(int));
b = (int *)malloc(SIZE*sizeof(int));
c = (int *)malloc(SIZE*sizeof(int));
cudaMalloc(&d_a, SIZE*sizeof(int));
cudaMalloc(&d_b, SIZE*sizeof(int));
cudaMalloc(&d_c, SIZE*sizeof(int));
for (int i = 0; i < SIZE; ++i) {
a[i] = i;
b[i] = i;
c[i] = 0;
}
cudaMemcpy(d_a, a, SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, SIZE*sizeof(int), cudaMemcpyHostToDevice);
VectorAdd<<<1, SIZE>>>(d_a, d_b, d_c, SIZE);
cudaMemcpy(c, d_c, SIZE*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < 10; ++i)
printf("c[%d] = %d\n", i, c[i]);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
|
a1c791d3330e4a985d1841e1407ed6d6a58ac0bd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %clang_cc1 %s -triple ptx32-unknown-unknown -fcuda-is-device -emit-llvm -o - | FileCheck %s
#include "../SemaCUDA/cuda.h"
// CHECK: define ptx_device{{.*}}device_function
__device__ void device_function() {}
// CHECK: define ptx_kernel{{.*}}global_function
__global__ void global_function() {
// CHECK: call ptx_device{{.*}}device_function
device_function();
}
|
a1c791d3330e4a985d1841e1407ed6d6a58ac0bd.cu
|
// RUN: %clang_cc1 %s -triple ptx32-unknown-unknown -fcuda-is-device -emit-llvm -o - | FileCheck %s
#include "../SemaCUDA/cuda.h"
// CHECK: define ptx_device{{.*}}device_function
__device__ void device_function() {}
// CHECK: define ptx_kernel{{.*}}global_function
__global__ void global_function() {
// CHECK: call ptx_device{{.*}}device_function
device_function();
}
|
da851ef539605b99a29fa70ee14f05245b469585.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> s d c
@author Adrien Remy
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "zgerbt.h"
#define block_height 32
#define block_width 4
#define block_length 256
#define NB 64
/***************************************************************************//**
Purpose
-------
ZPRBT_MVT compute B = UTB to randomize B
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in]
du COMPLEX_16 array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in,out]
db COMPLEX_16 array, dimension (n)
The n vector db computed by ZGESV_NOPIV_GPU
On exit db = du*db
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_zprbt_mtv_batched(
magma_int_t n,
magmaDoubleComplex *du, magmaDoubleComplex **db_array,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t threads = block_length;
dim3 grid( magma_ceildiv( n, 4*block_length ), batchCount);
hipLaunchKernelGGL(( magmablas_zapply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, du, n, db_array, 0);
hipLaunchKernelGGL(( magmablas_zapply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, du, n+n/2, db_array, n/2);
threads = block_length;
grid = magma_ceildiv( n, 2*block_length );
hipLaunchKernelGGL(( magmablas_zapply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, du, 0, db_array, 0);
}
/***************************************************************************//**
Purpose
-------
ZPRBT_MV compute B = VB to obtain the non randomized solution
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in,out]
db COMPLEX_16 array, dimension (n)
The n vector db computed by ZGESV_NOPIV_GPU
On exit db = dv*db
@param[in]
dv COMPLEX_16 array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_zprbt_mv_batched(
magma_int_t n,
magmaDoubleComplex *dv, magmaDoubleComplex **db_array,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t threads = block_length;
dim3 grid ( magma_ceildiv( n, 2*block_length ), batchCount);
hipLaunchKernelGGL(( magmablas_zapply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dv, 0, db_array, 0);
threads = block_length;
grid = magma_ceildiv( n, 4*block_length );
hipLaunchKernelGGL(( magmablas_zapply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dv, n, db_array, 0);
hipLaunchKernelGGL(( magmablas_zapply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dv, n+n/2, db_array, n/2);
}
/***************************************************************************//**
Purpose
-------
ZPRBT randomize a square general matrix using partial randomized transformation
Arguments
---------
@param[in]
n INTEGER
The number of columns and rows of the matrix dA. n >= 0.
@param[in,out]
dA COMPLEX_16 array, dimension (n,ldda)
The n-by-n matrix dA
On exit dA = duT*dA*d_V
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDA >= max(1,n).
@param[in]
du COMPLEX_16 array, dimension (n,2)
The 2*n vector representing the random butterfly matrix U
@param[in]
dv COMPLEX_16 array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_zprbt_batched(
magma_int_t n,
magmaDoubleComplex **dA_array, magma_int_t ldda,
magmaDoubleComplex *du, magmaDoubleComplex *dv,
magma_int_t batchCount, magma_queue_t queue)
{
du += ldda;
dv += ldda;
dim3 threads(block_height, block_width);
dim3 grid( magma_ceildiv( n, 4*block_height ),
magma_ceildiv( n, 4*block_width ),
batchCount );
hipLaunchKernelGGL(( magmablas_zelementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array, 0, ldda, du, 0, dv, 0);
hipLaunchKernelGGL(( magmablas_zelementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array, ldda*n/2, ldda, du, 0, dv, n/2);
hipLaunchKernelGGL(( magmablas_zelementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array, n/2, ldda, du, n/2, dv, 0);
hipLaunchKernelGGL(( magmablas_zelementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array, ldda*n/2+n/2, ldda, du, n/2, dv, n/2);
dim3 threads2(block_height, block_width);
dim3 grid2( magma_ceildiv( n, 2*block_height ),
magma_ceildiv( n, 2*block_width ),
batchCount );
hipLaunchKernelGGL(( magmablas_zelementary_multiplication_kernel_batched), dim3(grid2), dim3(threads2), 0, queue->cuda_stream() , n, dA_array, 0, ldda, du, -ldda, dv, -ldda);
}
|
da851ef539605b99a29fa70ee14f05245b469585.cu
|
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> s d c
@author Adrien Remy
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "zgerbt.h"
#define block_height 32
#define block_width 4
#define block_length 256
#define NB 64
/***************************************************************************//**
Purpose
-------
ZPRBT_MVT compute B = UTB to randomize B
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in]
du COMPLEX_16 array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in,out]
db COMPLEX_16 array, dimension (n)
The n vector db computed by ZGESV_NOPIV_GPU
On exit db = du*db
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_zprbt_mtv_batched(
magma_int_t n,
magmaDoubleComplex *du, magmaDoubleComplex **db_array,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t threads = block_length;
dim3 grid( magma_ceildiv( n, 4*block_length ), batchCount);
magmablas_zapply_transpose_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, du, n, db_array, 0);
magmablas_zapply_transpose_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, du, n+n/2, db_array, n/2);
threads = block_length;
grid = magma_ceildiv( n, 2*block_length );
magmablas_zapply_transpose_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n, du, 0, db_array, 0);
}
/***************************************************************************//**
Purpose
-------
ZPRBT_MV compute B = VB to obtain the non randomized solution
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in,out]
db COMPLEX_16 array, dimension (n)
The n vector db computed by ZGESV_NOPIV_GPU
On exit db = dv*db
@param[in]
dv COMPLEX_16 array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_zprbt_mv_batched(
magma_int_t n,
magmaDoubleComplex *dv, magmaDoubleComplex **db_array,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t threads = block_length;
dim3 grid ( magma_ceildiv( n, 2*block_length ), batchCount);
magmablas_zapply_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n, dv, 0, db_array, 0);
threads = block_length;
grid = magma_ceildiv( n, 4*block_length );
magmablas_zapply_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dv, n, db_array, 0);
magmablas_zapply_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dv, n+n/2, db_array, n/2);
}
/***************************************************************************//**
Purpose
-------
ZPRBT randomize a square general matrix using partial randomized transformation
Arguments
---------
@param[in]
n INTEGER
The number of columns and rows of the matrix dA. n >= 0.
@param[in,out]
dA COMPLEX_16 array, dimension (n,ldda)
The n-by-n matrix dA
On exit dA = duT*dA*d_V
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDA >= max(1,n).
@param[in]
du COMPLEX_16 array, dimension (n,2)
The 2*n vector representing the random butterfly matrix U
@param[in]
dv COMPLEX_16 array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_zprbt_batched(
magma_int_t n,
magmaDoubleComplex **dA_array, magma_int_t ldda,
magmaDoubleComplex *du, magmaDoubleComplex *dv,
magma_int_t batchCount, magma_queue_t queue)
{
du += ldda;
dv += ldda;
dim3 threads(block_height, block_width);
dim3 grid( magma_ceildiv( n, 4*block_height ),
magma_ceildiv( n, 4*block_width ),
batchCount );
magmablas_zelementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array, 0, ldda, du, 0, dv, 0);
magmablas_zelementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array, ldda*n/2, ldda, du, 0, dv, n/2);
magmablas_zelementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array, n/2, ldda, du, n/2, dv, 0);
magmablas_zelementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array, ldda*n/2+n/2, ldda, du, n/2, dv, n/2);
dim3 threads2(block_height, block_width);
dim3 grid2( magma_ceildiv( n, 2*block_height ),
magma_ceildiv( n, 2*block_width ),
batchCount );
magmablas_zelementary_multiplication_kernel_batched<<< grid2, threads2, 0, queue->cuda_stream() >>>(n, dA_array, 0, ldda, du, -ldda, dv, -ldda);
}
|
896c1cda17727bb87326648ce707243be14fb06f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void warpTest()
{
printf("BlockId: %d, ThreadId: %d\n", blockIdx.x, threadIdx.x);
}
int example2()
{
warpTest << <5, 32 >> > ();
//
getchar();
return 0;
}
|
896c1cda17727bb87326648ce707243be14fb06f.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void warpTest()
{
printf("BlockId: %d, ThreadId: %d\n", blockIdx.x, threadIdx.x);
}
int example2()
{
warpTest << <5, 32 >> > ();
// ожидаем нажатия любой клавиши
getchar();
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.