hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
e214632d3097cd701c043a3290c7325c900d7160.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaUtil.h"
#include "../../../../usr/local/cuda/include/host_defines.h"
#include <cmath>
__global__
void interpolateKernel(
float* sxyzs, // [spn,3]
float* sprobs, // [spn,cn]
float* qxyzs, // [qpn,3]
float* qprobs, // [qpn,cn]
int* nidxs,
int* nidxs_lens,// [qpn]
int* nidxs_bgs, // [qpn]
int spn,
int qpn,
int cn,
float ratio
)
{
int qpi = threadIdx.x + blockIdx.x*blockDim.x;
if(qpi>=qpn) return;
// compute distance
float sum_exp_neg=0.0;
float qx=qxyzs[qpi*3+0];
float qy=qxyzs[qpi*3+1];
float qz=qxyzs[qpi*3+2];
int nbg=nidxs_bgs[qpi];
int nn=nidxs_lens[qpi];
int ned=nn+nbg;
for(int ni=nbg;ni<ned;ni++)
{
float x=sxyzs[nidxs[ni]*3+0];
float y=sxyzs[nidxs[ni]*3+1];
float z=sxyzs[nidxs[ni]*3+2];
float dist=sqrt((qx-x)*(qx-x)+(qy-y)*(qy-y)+(qz-z)*(qz-z));
sum_exp_neg+=exp(-dist*ratio);
}
float* qprobs_p=&qprobs[qpi*cn];
for(int ci=0;ci<cn;ci++)
qprobs_p[ci]=0.f;
for(int ni=nbg;ni<ned;ni++)
{
float x=sxyzs[nidxs[ni]*3+0];
float y=sxyzs[nidxs[ni]*3+1];
float z=sxyzs[nidxs[ni]*3+2];
float dist=sqrt((qx-x)*(qx-x)+(qy-y)*(qy-y)+(qz-z)*(qz-z));
float w=exp(-dist*ratio)/sum_exp_neg;
for(int ci=0;ci<cn;ci++)
qprobs_p[ci]+=w*sprobs[nidxs[ni]*cn+ci];
}
}
void interpolateImpl(
float* h_sxyzs, // [spn,3]
float* h_sprobs, // [spn,cn]
float* h_qxyzs, // [qpn,3]
float* h_qprobs, // [qpn,cn]
int* h_nidxs,
int* h_nidxs_lens,// [qpn]
int* h_nidxs_bgs, // [qpn]
int spn,
int qpn,
int cn,
int nn,
float ratio,
int gpu_id
)
{
gpuErrchk(hipSetDevice(gpu_id))
int block_num=qpn/1024;
if(qpn%1024>0) block_num++;
dim3 block_dim(block_num);
dim3 thread_dim(1024);
float* d_syxzs,*d_sprobs,*d_qxyzs,*d_qprobs;
gpuErrchk(hipMalloc((void**)&d_syxzs, spn * 3 * sizeof(float)))
gpuErrchk(hipMalloc((void**)&d_sprobs, spn * cn * sizeof(float)))
gpuErrchk(hipMalloc((void**)&d_qxyzs, qpn * 3 * sizeof(float)))
gpuErrchk(hipMalloc((void**)&d_qprobs, qpn * cn * sizeof(float)))
gpuErrchk(hipMemcpy(d_syxzs, h_sxyzs, spn * 3 * sizeof(float), hipMemcpyHostToDevice))
gpuErrchk(hipMemcpy(d_sprobs, h_sprobs, spn * cn * sizeof(float), hipMemcpyHostToDevice))
gpuErrchk(hipMemcpy(d_qxyzs, h_qxyzs, qpn * 3 * sizeof(float), hipMemcpyHostToDevice))
int* d_nidxs,*d_nidxs_lens,*d_nidxs_bgs;
gpuErrchk(hipMalloc((void**)&d_nidxs, nn * sizeof(int)))
gpuErrchk(hipMalloc((void**)&d_nidxs_lens, qpn * sizeof(int)))
gpuErrchk(hipMalloc((void**)&d_nidxs_bgs, qpn * sizeof(int)))
gpuErrchk(hipMemcpy(d_nidxs, h_nidxs, nn * sizeof(int), hipMemcpyHostToDevice))
gpuErrchk(hipMemcpy(d_nidxs_lens, h_nidxs_lens, qpn * sizeof(int), hipMemcpyHostToDevice))
gpuErrchk(hipMemcpy(d_nidxs_bgs, h_nidxs_bgs, qpn * sizeof(int), hipMemcpyHostToDevice))
hipLaunchKernelGGL(( interpolateKernel), dim3(block_dim),dim3(thread_dim), 0, 0,
d_syxzs,d_sprobs,d_qxyzs,d_qprobs,d_nidxs,d_nidxs_lens,d_nidxs_bgs,spn,qpn,cn,ratio);
gpuErrchk(hipMemcpy(h_qprobs, d_qprobs, qpn * cn * sizeof(float), hipMemcpyDeviceToHost))
hipFree(d_syxzs);
hipFree(d_sprobs);
hipFree(d_qxyzs);
hipFree(d_qprobs);
hipFree(d_nidxs);
hipFree(d_nidxs_lens);
hipFree(d_nidxs_bgs);
}
|
e214632d3097cd701c043a3290c7325c900d7160.cu
|
#include "CudaUtil.h"
#include "../../../../usr/local/cuda/include/host_defines.h"
#include <cmath>
__global__
void interpolateKernel(
float* sxyzs, // [spn,3]
float* sprobs, // [spn,cn]
float* qxyzs, // [qpn,3]
float* qprobs, // [qpn,cn]
int* nidxs,
int* nidxs_lens,// [qpn]
int* nidxs_bgs, // [qpn]
int spn,
int qpn,
int cn,
float ratio
)
{
int qpi = threadIdx.x + blockIdx.x*blockDim.x;
if(qpi>=qpn) return;
// compute distance
float sum_exp_neg=0.0;
float qx=qxyzs[qpi*3+0];
float qy=qxyzs[qpi*3+1];
float qz=qxyzs[qpi*3+2];
int nbg=nidxs_bgs[qpi];
int nn=nidxs_lens[qpi];
int ned=nn+nbg;
for(int ni=nbg;ni<ned;ni++)
{
float x=sxyzs[nidxs[ni]*3+0];
float y=sxyzs[nidxs[ni]*3+1];
float z=sxyzs[nidxs[ni]*3+2];
float dist=sqrt((qx-x)*(qx-x)+(qy-y)*(qy-y)+(qz-z)*(qz-z));
sum_exp_neg+=exp(-dist*ratio);
}
float* qprobs_p=&qprobs[qpi*cn];
for(int ci=0;ci<cn;ci++)
qprobs_p[ci]=0.f;
for(int ni=nbg;ni<ned;ni++)
{
float x=sxyzs[nidxs[ni]*3+0];
float y=sxyzs[nidxs[ni]*3+1];
float z=sxyzs[nidxs[ni]*3+2];
float dist=sqrt((qx-x)*(qx-x)+(qy-y)*(qy-y)+(qz-z)*(qz-z));
float w=exp(-dist*ratio)/sum_exp_neg;
for(int ci=0;ci<cn;ci++)
qprobs_p[ci]+=w*sprobs[nidxs[ni]*cn+ci];
}
}
void interpolateImpl(
float* h_sxyzs, // [spn,3]
float* h_sprobs, // [spn,cn]
float* h_qxyzs, // [qpn,3]
float* h_qprobs, // [qpn,cn]
int* h_nidxs,
int* h_nidxs_lens,// [qpn]
int* h_nidxs_bgs, // [qpn]
int spn,
int qpn,
int cn,
int nn,
float ratio,
int gpu_id
)
{
gpuErrchk(cudaSetDevice(gpu_id))
int block_num=qpn/1024;
if(qpn%1024>0) block_num++;
dim3 block_dim(block_num);
dim3 thread_dim(1024);
float* d_syxzs,*d_sprobs,*d_qxyzs,*d_qprobs;
gpuErrchk(cudaMalloc((void**)&d_syxzs, spn * 3 * sizeof(float)))
gpuErrchk(cudaMalloc((void**)&d_sprobs, spn * cn * sizeof(float)))
gpuErrchk(cudaMalloc((void**)&d_qxyzs, qpn * 3 * sizeof(float)))
gpuErrchk(cudaMalloc((void**)&d_qprobs, qpn * cn * sizeof(float)))
gpuErrchk(cudaMemcpy(d_syxzs, h_sxyzs, spn * 3 * sizeof(float), cudaMemcpyHostToDevice))
gpuErrchk(cudaMemcpy(d_sprobs, h_sprobs, spn * cn * sizeof(float), cudaMemcpyHostToDevice))
gpuErrchk(cudaMemcpy(d_qxyzs, h_qxyzs, qpn * 3 * sizeof(float), cudaMemcpyHostToDevice))
int* d_nidxs,*d_nidxs_lens,*d_nidxs_bgs;
gpuErrchk(cudaMalloc((void**)&d_nidxs, nn * sizeof(int)))
gpuErrchk(cudaMalloc((void**)&d_nidxs_lens, qpn * sizeof(int)))
gpuErrchk(cudaMalloc((void**)&d_nidxs_bgs, qpn * sizeof(int)))
gpuErrchk(cudaMemcpy(d_nidxs, h_nidxs, nn * sizeof(int), cudaMemcpyHostToDevice))
gpuErrchk(cudaMemcpy(d_nidxs_lens, h_nidxs_lens, qpn * sizeof(int), cudaMemcpyHostToDevice))
gpuErrchk(cudaMemcpy(d_nidxs_bgs, h_nidxs_bgs, qpn * sizeof(int), cudaMemcpyHostToDevice))
interpolateKernel<<<block_dim,thread_dim>>>
(d_syxzs,d_sprobs,d_qxyzs,d_qprobs,d_nidxs,d_nidxs_lens,d_nidxs_bgs,spn,qpn,cn,ratio);
gpuErrchk(cudaMemcpy(h_qprobs, d_qprobs, qpn * cn * sizeof(float), cudaMemcpyDeviceToHost))
cudaFree(d_syxzs);
cudaFree(d_sprobs);
cudaFree(d_qxyzs);
cudaFree(d_qprobs);
cudaFree(d_nidxs);
cudaFree(d_nidxs_lens);
cudaFree(d_nidxs_bgs);
}
|
2bb2dc26bfa3234557b275074aac2eb85b6d9a70.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pooling_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
const bool real_pool_size, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
if (real_pool_size) {
pool_size = (hend - hstart) * (wend - wstart);
}
Dtype aveval = 0;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void GlobalAvePoolForward(const int spatial_dim,
const Dtype* bottom_data, Dtype* top_data) {
__shared__ Dtype buffer[CAFFE_CUDA_NUM_THREADS];
unsigned int tid = threadIdx.x;
buffer[tid] = 0;
__syncthreads();
for (int j = tid; j < spatial_dim; j += blockDim.x) {
buffer[tid] += bottom_data[blockIdx.x * spatial_dim + j];
}
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i >>= 1) {
if (tid < i) {
buffer[threadIdx.x] += buffer[threadIdx.x + i];
}
__syncthreads();
}
if (tid == 0) {
top_data[blockIdx.x] = buffer[0] / spatial_dim;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const rand_idx, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
}
}
const float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_slice[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = 0.;
Dtype cumvalues = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];
}
}
top_data[index] = (cumsum > 0.) ? cumvalues / cumsum : 0.;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
if (this->layer_param_.pooling_param().global_pooling()) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( GlobalAvePoolForward<Dtype>), dim3(bottom[0]->count(0, 2)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom[0]->count(2), bottom_data, top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, real_pool_size_, top_data);
}
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTrain<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTest<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff,
const int* const mask, const Dtype* const top_mask, const int num,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice = top_diff + offset;
if (mask) {
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
} else {
const Dtype* const top_mask_slice = top_mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const bool real_pool_size, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
if (real_pool_size) {
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
pool_size = (hend - hstart) * (wend - wstart);
}
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void GlobalAvePoolBackward(const int nthreads, const int spatial_dim,
const Dtype* top_diff, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
bottom_diff[index] = top_diff[n] / spatial_dim;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* const rand_idx, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const rand_idx_slice =
rand_idx + (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff_slice[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
if (this->layer_param_.pooling_param().global_pooling()) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( GlobalAvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom[0]->count(2),
top_diff, bottom_diff);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, real_pool_size_, bottom_diff);
}
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer);
} // namespace caffe
|
2bb2dc26bfa3234557b275074aac2eb85b6d9a70.cu
|
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pooling_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
const bool real_pool_size, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
if (real_pool_size) {
pool_size = (hend - hstart) * (wend - wstart);
}
Dtype aveval = 0;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void GlobalAvePoolForward(const int spatial_dim,
const Dtype* bottom_data, Dtype* top_data) {
__shared__ Dtype buffer[CAFFE_CUDA_NUM_THREADS];
unsigned int tid = threadIdx.x;
buffer[tid] = 0;
__syncthreads();
for (int j = tid; j < spatial_dim; j += blockDim.x) {
buffer[tid] += bottom_data[blockIdx.x * spatial_dim + j];
}
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i >>= 1) {
if (tid < i) {
buffer[threadIdx.x] += buffer[threadIdx.x + i];
}
__syncthreads();
}
if (tid == 0) {
top_data[blockIdx.x] = buffer[0] / spatial_dim;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const rand_idx, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
}
}
const float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_slice[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = 0.;
Dtype cumvalues = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];
}
}
top_data[index] = (cumsum > 0.) ? cumvalues / cumsum : 0.;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
if (this->layer_param_.pooling_param().global_pooling()) {
// NOLINT_NEXT_LINE(whitespace/operators)
GlobalAvePoolForward<Dtype><<<bottom[0]->count(0, 2), CAFFE_CUDA_NUM_THREADS>>>(
bottom[0]->count(2), bottom_data, top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, real_pool_size_, top_data);
}
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff,
const int* const mask, const Dtype* const top_mask, const int num,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice = top_diff + offset;
if (mask) {
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
} else {
const Dtype* const top_mask_slice = top_mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const bool real_pool_size, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
if (real_pool_size) {
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
pool_size = (hend - hstart) * (wend - wstart);
}
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void GlobalAvePoolBackward(const int nthreads, const int spatial_dim,
const Dtype* top_diff, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
bottom_diff[index] = top_diff[n] / spatial_dim;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* const rand_idx, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const rand_idx_slice =
rand_idx + (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff_slice[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
if (this->layer_param_.pooling_param().global_pooling()) {
// NOLINT_NEXT_LINE(whitespace/operators)
GlobalAvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom[0]->count(2),
top_diff, bottom_diff);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, real_pool_size_, bottom_diff);
}
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer);
} // namespace caffe
|
58204488b123ac3aa6c87a61e855edcc52a69feb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc, char **argv) {
printf("%s Starting...\n", argv[0]);
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess) {
printf("hipGetDeviceCount returned %d\n-> %s\n",
(int)error_id, hipGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
if (deviceCount == 0) {
printf("There are no available device(s) that support CUDA\n");
} else {
printf("Detected %d cuda Capable device(s)\n", deviceCount);
}
int dev = 0, driverVersion = 0, runtimeVersion = 0;
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("Device %d: \"%s\"\n", dev, deviceProp.name);
hipDriverGetVersion(&driverVersion);
hipRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version: %d.%d / %d.%d\n",
driverVersion/1000, (driverVersion%100)/10,
runtimeVersion/1000, (runtimeVersion%100)/10);
printf(" CUDA Capability MajorMinor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
printf(" Total amont of global memory: %.2f MBytes"
"(%llu bytes)\n",
(float)deviceProp.totalGlobalMem/(pow(1024.0, 3)),
(unsigned long long) deviceProp.totalGlobalMem);
printf(" GPU Clock rate: %.0f MHz "
"(%0.2f GHz)\n", deviceProp.clockRate * 1e-3f,
deviceProp.clockRate * 1e-6f);
printf(" Memory Clock rate: %.0f MHz\n",
deviceProp.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n",
deviceProp.memoryBusWidth);
if (deviceProp.l2CacheSize)
{
printf(" L2 Cache Size: %d bytes\n",
deviceProp.l2CacheSize);
}
printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), "
"2D=(%d,%d), 3D=(%d,%d,%d)\n", deviceProp.maxTexture1D,
deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1],
deviceProp.maxTexture3D[2]);
printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, "
"2D=(%d,%d) x %d\n", deviceProp.maxTexture1DLayered[0],
deviceProp.maxTexture1DLayered[1], deviceProp.maxTexture2DLayered[0],
deviceProp.maxTexture2DLayered[1],
deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %lu bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n",
deviceProp.memPitch);
exit(EXIT_SUCCESS);
}
|
58204488b123ac3aa6c87a61e855edcc52a69feb.cu
|
#include <cuda_runtime.h>
#include <stdio.h>
int main(int argc, char **argv) {
printf("%s Starting...\n", argv[0]);
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess) {
printf("cudaGetDeviceCount returned %d\n-> %s\n",
(int)error_id, cudaGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
if (deviceCount == 0) {
printf("There are no available device(s) that support CUDA\n");
} else {
printf("Detected %d cuda Capable device(s)\n", deviceCount);
}
int dev = 0, driverVersion = 0, runtimeVersion = 0;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Device %d: \"%s\"\n", dev, deviceProp.name);
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version: %d.%d / %d.%d\n",
driverVersion/1000, (driverVersion%100)/10,
runtimeVersion/1000, (runtimeVersion%100)/10);
printf(" CUDA Capability MajorMinor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
printf(" Total amont of global memory: %.2f MBytes"
"(%llu bytes)\n",
(float)deviceProp.totalGlobalMem/(pow(1024.0, 3)),
(unsigned long long) deviceProp.totalGlobalMem);
printf(" GPU Clock rate: %.0f MHz "
"(%0.2f GHz)\n", deviceProp.clockRate * 1e-3f,
deviceProp.clockRate * 1e-6f);
printf(" Memory Clock rate: %.0f MHz\n",
deviceProp.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n",
deviceProp.memoryBusWidth);
if (deviceProp.l2CacheSize)
{
printf(" L2 Cache Size: %d bytes\n",
deviceProp.l2CacheSize);
}
printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), "
"2D=(%d,%d), 3D=(%d,%d,%d)\n", deviceProp.maxTexture1D,
deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1],
deviceProp.maxTexture3D[2]);
printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, "
"2D=(%d,%d) x %d\n", deviceProp.maxTexture1DLayered[0],
deviceProp.maxTexture1DLayered[1], deviceProp.maxTexture2DLayered[0],
deviceProp.maxTexture2DLayered[1],
deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %lu bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n",
deviceProp.memPitch);
exit(EXIT_SUCCESS);
}
|
9dc93d9cb1de9aa23c531d6441cb2239095e524a.hip
|
// !!! This is a file automatically generated by hipify!!!
/* -*- mode: C++; c-file-style: "bsd"; c-basic-offset: 2; indent-tabs-mode: nil -*- */
#include "linearSolvers.hpp"
#include <stdio.h>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_complex.h>
#include <rocblas.h>
#include <cusolverDn.h>
#include "Complex.hpp"
#include "Matrix.hpp"
#include "Accelerator/DeviceStorage.hpp"
#include "Accelerator/deviceCheckError.hpp"
/*
#define IDX(i, j, lDim) (((j)*(lDim))+(i))
template <typename T>
void zeroMatrixCuda(T *devM, int lDim, int nCol)
{
// for(int i=0; i<m.n_row(); i++)
// for(int j=0; j<m.n_col(); j++)
// m(i,j) = 0.0;
hipMemset(devM, 0, lDim*nCol*sizeof(T));
}
template <typename T>
__global__ void setDiagonalKernelCuda(T *devM, int lDim, int nCol, T val)
{
int i=blockIdx.x*blockDim.x + threadIdx.x;
if(i<nCol)
{
devM[IDX(i, i, lDim)] = val;
}
}
template <typename T>
__global__ void addDiagonalKernelCuda(T *devM, int lDim, int nCol, T val)
{
int i=blockIdx.x*blockDim.x + threadIdx.x;
if(i<nCol)
{
devM[IDX(i, i, lDim)] = cuCadd(devM[IDX(i, i, lDim)], val);
}
}
template <typename T>
void unitMatrixCuda(T *devM, int lDim, int nCol)
{
zeroMatrixCuda(devM, lDim, nCol);
setDiagonalKernelCuda<<<nCol,1>>>(devM, lDim, nCol, 1.0);
}
*/
template <typename T>
__global__ void zeroDiagonalBlocksKernelCuda(T *devM, int lDim, int nCol, int blockSize)
{
int iBlock = blockIdx.x*blockDim.x + threadIdx.x;
int jBlock = blockIdx.y*blockDim.y + threadIdx.y;
if(iBlock<nCol/blockSize)
if(jBlock<nCol/blockSize)
{
int ii=iBlock*blockSize;
int jj=jBlock*blockSize;
for(int i=0; i<::min(blockSize, nCol-ii); i++)
for(int j=0; j<::min(blockSize, nCol-jj); j++)
devM[IDX(ii+i, jj+j, lDim)] = 0.0;
}
}
void transferT0MatrixToGPUCuda(Complex *devT0, LSMSSystemParameters &lsms, LocalTypeInfo &local,
AtomData &atom, int iie, int ispin)
{
int kkrsz_ns = lsms.n_spin_cant*atom.kkrsz;
int jsm = kkrsz_ns * kkrsz_ns * ispin;
hipMemcpy(devT0, &local.tmatStore(iie*local.blkSizeTmatStore + jsm,atom.LIZStoreIdx[0]),
kkrsz_ns*kkrsz_ns*sizeof(hipDoubleComplex), hipMemcpyHostToDevice);
#ifdef T_CUDA_DEBUG
std::ofstream myfile;
auto filename = "TAU" + std::to_string(iie) + "_" + std::to_string(ispin);
myfile.open(filename.c_str());
for (int i = 0; i < kkrsz_ns * kkrsz_ns; i++) {
Complex buffer;
hipMemcpy(&buffer, &devT0[i], sizeof(hipDoubleComplex), hipMemcpyDeviceToHost);
myfile << std::real(buffer) << " " << std::imag(buffer) << std::endl;
}
myfile.close();
#endif
}
void transferFullTMatrixToGPUCUDA(Complex *devT, LSMSSystemParameters &lsms, LocalTypeInfo &local,
AtomData &atom, int ispin)
{
int kkrsz_ns = lsms.n_spin_cant * atom.kkrsz;
int nrmat_ns = lsms.n_spin_cant * atom.nrmat;
Matrix <Complex> bigT(nrmat_ns, nrmat_ns);
if (lsms.n_spin_pola == lsms.n_spin_cant) { // non polarized or spin canted
for (int l = 0; l < atom.numLIZ; l++) {
for (int i = 0; i < kkrsz_ns; i++) {
for (int j = 0; j < kkrsz_ns; j++) {
bigT(l*kkrsz_ns + i, l*kkrsz_ns + j) = local.tmatStore(i + j * kkrsz_ns, atom.LIZStoreIdx[l]);
}
}
}
} else {
int jsm = kkrsz_ns * kkrsz_ns * ispin;
for (int l = 0; l < atom.numLIZ; l++) {
for (int i = 0; i < kkrsz_ns; i++) {
for (int j = 0; j < kkrsz_ns; j++) {
bigT(l*kkrsz_ns + i, l*kkrsz_ns + j) = local.tmatStore(i + j * kkrsz_ns + jsm, atom.LIZStoreIdx[l]);
}
}
}
}
/*
std::cout << "00 block of bigT on the CPU" << std::endl;
for (int i = 0; i <kkrsz_ns; i++){
for (int j = 0; j < kkrsz_ns; j++){
std::cout << bigT(i,j) << " ";
}
std::cout << std::endl;
}
*/
hipMemcpy(devT, &bigT(0,0), nrmat_ns*nrmat_ns*sizeof(hipDoubleComplex), hipMemcpyHostToDevice);
}
void transferMatrixToGPUCuda(Complex *devM, Matrix<Complex> &m)
{
hipMemcpy(devM, &m(0,0), m.l_dim()*m.n_col()*sizeof(hipDoubleComplex), hipMemcpyHostToDevice);
}
void transferMatrixFromGPUCuda(Matrix<Complex> &m, hipDoubleComplex *devM)
{
hipMemcpy(&m(0,0), devM, m.l_dim()*m.n_col()*sizeof(hipDoubleComplex), hipMemcpyDeviceToHost);
}
__global__ void copyTMatrixToTauCuda(hipDoubleComplex *tau, hipDoubleComplex *t, int kkrsz, int nrmat)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < kkrsz)
{
for(int j=0; j<kkrsz; j++)
tau[IDX(i,j,nrmat)] = t[IDX(i,j,kkrsz)];
}
}
__global__ void copyBigTMatrixToTauCuda(hipDoubleComplex *tau, hipDoubleComplex *t, int nrmat)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < nrmat)
{
for(int j=0;j<nrmat;j++){
tau[IDX(i,j,nrmat)] = t[IDX(i,j,nrmat)];
}
}
}
__global__ void copyTauToTau00Cuda(hipDoubleComplex *tau00, hipDoubleComplex *tau, int kkrsz, int nrmat)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < kkrsz)
{
for(int j=0; j<kkrsz; j++)
tau00[IDX(i,j,kkrsz)] = tau[IDX(i,j,nrmat)];
}
}
void solveTau00zgetrf_cublas(LSMSSystemParameters &lsms, LocalTypeInfo &local, DeviceStorage &d, AtomData &atom,
Complex *tMatrix, Complex *devM,
Matrix<Complex> &tau00)
{
hipblasHandle_t cublasHandle = DeviceStorage::getCublasHandle();
int nrmat_ns = lsms.n_spin_cant*atom.nrmat; // total size of the kkr matrix
int kkrsz_ns = lsms.n_spin_cant*atom.kkrsz; // size of t00 block
// reference algorithm. Use LU factorization and linear solve for dense matrices in LAPACK
hipDoubleComplex *Aarray[1], *Barray[1];
hipDoubleComplex *devTau = (hipDoubleComplex *)d.getDevTau();
hipDoubleComplex *devTau00 = (hipDoubleComplex *)d.getDevTau00();
// printf("zero Matrix\n");
zeroMatrixCuda(devTau, nrmat_ns, kkrsz_ns);
deviceCheckError();
// printf("copyTMatrixToTau\n");
hipLaunchKernelGGL(( copyTMatrixToTauCuda), dim3(kkrsz_ns),dim3(1), 0, 0, devTau, (hipDoubleComplex *)tMatrix, kkrsz_ns, nrmat_ns);
deviceCheckError();
Barray[0] = devTau;
Aarray[0] = (hipDoubleComplex *)devM;
int *ipivArray=d.getDevIpvt();
int *infoArray = d.getDevInfo();
int info;
// printf("hipblasZgetrfBatched\n");
cublasCheckError(hipblasZgetrfBatched(cublasHandle, nrmat_ns, Aarray, nrmat_ns, ipivArray, infoArray, 1));
// printf("hipblasZgetrsBatched\n");
cublasCheckError(hipblasZgetrsBatched(cublasHandle, HIPBLAS_OP_N, nrmat_ns, kkrsz_ns, Aarray, nrmat_ns, ipivArray,
Barray, nrmat_ns, &info, 1));
// copy result into tau00
// printf("copyTauToTau00\n");
hipLaunchKernelGGL(( copyTauToTau00Cuda), dim3(kkrsz_ns),dim3(1), 0, 0, devTau00, devTau, kkrsz_ns, nrmat_ns);
deviceCheckError();
// printf("transferMatrixFromGPU\n");
transferMatrixFromGPUCuda(tau00, devTau00);
deviceCheckError();
}
void solveTauFullzgetrf_cublas(LSMSSystemParameters &lsms, LocalTypeInfo &local, DeviceStorage &d, AtomData &atom,
Complex *tMatrix, Complex *devM, Complex *devTauFull)
{
hipblasHandle_t cublasHandle = DeviceStorage::getCublasHandle();
int nrmat_ns = lsms.n_spin_cant * atom.nrmat;
int kkrsz_ns = lsms.n_spin_cant * atom.kkrsz;
hipDoubleComplex *Aarray[1], *Barray[1];
//hipDoubleComplex *devTauFull = (hipDoubleComplex *)d.getDevTauFull();
zeroMatrixCuda((hipDoubleComplex *)devTauFull, nrmat_ns, nrmat_ns);
deviceCheckError();
hipLaunchKernelGGL(( copyBigTMatrixToTauCuda), dim3(nrmat_ns),dim3(1), 0, 0, (hipDoubleComplex *)devTauFull, (hipDoubleComplex *)tMatrix, nrmat_ns);
deviceCheckError();
Barray[0] = (hipDoubleComplex *) devTauFull;
Aarray[0] = (hipDoubleComplex *) devM;
int *ipivArray=d.getDevIpvt();
int *infoArray = d.getDevInfo();
int info;
cublasCheckError(hipblasZgetrfBatched(cublasHandle, nrmat_ns, Aarray, nrmat_ns, ipivArray, infoArray, 1));
// printf("hipblasZgetrsBatched\n");
cublasCheckError(hipblasZgetrsBatched(cublasHandle, HIPBLAS_OP_N, nrmat_ns, nrmat_ns, Aarray, nrmat_ns, ipivArray,
Barray, nrmat_ns, &info, 1));
//transferMatrixFromGPUCuda(tau, devTauFull);
deviceCheckError();
}
#ifndef ARCH_IBM
void solveTau00zzgesv_cusolver(LSMSSystemParameters &lsms, LocalTypeInfo &local, DeviceStorage &d, AtomData &atom,
Complex *tMatrix, Complex *devM, Matrix<Complex> &tau00, int ispin)
{
hipsolverDnHandle_t cusolverDnHandle = DeviceStorage::getCusolverDnHandle();
int nrmat_ns = lsms.n_spin_cant*atom.nrmat; // total size of the kkr matrix
int kkrsz_ns = lsms.n_spin_cant*atom.kkrsz; // size of t00 block
// reference algorithm. Use LU factorization and linear solve for dense matrices in LAPACK
hipDoubleComplex *devTau = (hipDoubleComplex *)d.getDevTau();
hipDoubleComplex *devTau00 = (hipDoubleComplex *)d.getDevTau00();
hipDoubleComplex *devWork = (hipDoubleComplex *)d.getDevWork();
hipDoubleComplex *devT = (hipDoubleComplex *)d.getDevT();
int *devIpiv = d.getDevIpvt();
int devInfo[1]; // d.getDevInfo();
zeroMatrixCuda(devTau, nrmat_ns, kkrsz_ns);
zeroMatrixCuda(devT, nrmat_ns, kkrsz_ns);
hipLaunchKernelGGL(( copyTMatrixToTauCuda), dim3(kkrsz_ns),dim3(1), 0, 0, devT, (hipDoubleComplex *)tMatrix, kkrsz_ns, nrmat_ns);
int iter;
cusolverStatus_t status = cusolverDnZZgesv(cusolverDnHandle, nrmat_ns, kkrsz_ns,
(hipDoubleComplex *)devM, nrmat_ns, devIpiv, devT, nrmat_ns, devTau, nrmat_ns,
devWork, d.getDevWorkBytes(), &iter, devInfo);
if(status!=CUSOLVER_STATUS_SUCCESS)
{
printf("cusolverDnZZgesv returned %d\n",status);
}
hipLaunchKernelGGL(( copyTauToTau00Cuda), dim3(kkrsz_ns),dim3(1), 0, 0, devTau00, devTau, kkrsz_ns, nrmat_ns);
transferMatrixFromGPUCuda(tau00, devTau00);
}
#endif
void solveTau00zgetrf_cusolver(LSMSSystemParameters &lsms, LocalTypeInfo &local, DeviceStorage &d, AtomData &atom,
Complex *tMatrix, Complex *devM, Matrix<Complex> &tau00, int ispin)
{
hipsolverDnHandle_t cusolverDnHandle = DeviceStorage::getCusolverDnHandle();
int nrmat_ns = lsms.n_spin_cant*atom.nrmat; // total size of the kkr matrix
int kkrsz_ns = lsms.n_spin_cant*atom.kkrsz; // size of t00 block
// reference algorithm. Use LU factorization and linear solve for dense matrices in LAPACK
hipDoubleComplex *devTau = (hipDoubleComplex *)d.getDevTau();
hipDoubleComplex *devTau00 = (hipDoubleComplex *)d.getDevTau00();
hipDoubleComplex *devWork = (hipDoubleComplex *)d.getDevWork();
int *devIpiv = d.getDevIpvt();
int *devInfo = d.getDevInfo();
zeroMatrixCuda(devTau, nrmat_ns, kkrsz_ns);
deviceCheckError();
hipLaunchKernelGGL(( copyTMatrixToTauCuda), dim3(kkrsz_ns),dim3(1), 0, 0, devTau, (hipDoubleComplex *)tMatrix, kkrsz_ns, nrmat_ns);
deviceCheckError();
cusolverCheckError(hipsolverDnZgetrf(cusolverDnHandle, nrmat_ns, nrmat_ns,
(hipDoubleComplex *)devM, nrmat_ns, devWork, devIpiv,
devInfo ));
cusolverCheckError(hipsolverDnZgetrs(cusolverDnHandle, HIPBLAS_OP_N, nrmat_ns, kkrsz_ns,
(hipDoubleComplex *)devM, nrmat_ns, devIpiv, devTau, nrmat_ns, devInfo));
// copy result into tau00
hipLaunchKernelGGL(( copyTauToTau00Cuda), dim3(kkrsz_ns),dim3(1), 0, 0, devTau00, devTau, kkrsz_ns, nrmat_ns);
deviceCheckError();
transferMatrixFromGPUCuda(tau00, devTau00);
deviceCheckError();
}
void solveTauFullzgetrf_cusolver(LSMSSystemParameters &lsms, LocalTypeInfo &local, DeviceStorage &d, AtomData &atom,
Complex *tMatrix, Complex *devM, Complex *devTauFull, int ispin)
{
hipsolverDnHandle_t cusolverDnHandle = DeviceStorage::getCusolverDnHandle();
int nrmat_ns = lsms.n_spin_cant*atom.nrmat; // total size of the kkr matrix
int kkrsz_ns = lsms.n_spin_cant*atom.kkrsz; // size of t00 block
// reference algorithm. Use LU factorization and linear solve for dense matrices in LAPACK
int *devIpiv = d.getDevIpvt();
int *devInfo = d.getDevInfo();
hipDoubleComplex *devWork = (hipDoubleComplex *)d.getDevWork();
zeroMatrixCuda(devTauFull, nrmat_ns, nrmat_ns);
deviceCheckError();
hipLaunchKernelGGL(( copyBigTMatrixToTauCuda), dim3(nrmat_ns),dim3(1), 0, 0, (hipDoubleComplex *)devTauFull, (hipDoubleComplex *)tMatrix, nrmat_ns);
deviceCheckError();
cusolverCheckError(hipsolverDnZgetrf(cusolverDnHandle, nrmat_ns, nrmat_ns,
(hipDoubleComplex *)devM, nrmat_ns, devWork, devIpiv,
devInfo ));
//std::cout << nrmat_ns << " " << std::endl;
//printf(" %p %p %p %p\n", devM, devTauFull, devIpiv, devInfo);
cusolverCheckError(hipsolverDnZgetrs(cusolverDnHandle, HIPBLAS_OP_N, nrmat_ns, nrmat_ns,
(hipDoubleComplex *)devM, nrmat_ns, devIpiv, (hipDoubleComplex *)devTauFull, nrmat_ns, devInfo));
//transferMatrixFromGPUCuda(tau, devTauFull);
deviceCheckError();
}
#ifdef USE_XGETRF
void solveTau00Xgetrf_cusolver(LSMSSystemParameters &lsms, LocalTypeInfo &local, DeviceStorage &d, AtomData &atom,
Complex *tMatrix, Complex *devM, Matrix<Complex> &tau00, int ispin)
{
hipsolverDnHandle_t cusolverDnHandle = DeviceStorage::getCusolverDnHandle();
hipsolverDnParams_t cusolverDnParams = DeviceStorage::getCusolverParams();
int nrmat_ns = lsms.n_spin_cant*atom.nrmat; // total size of the kkr matrix
int kkrsz_ns = lsms.n_spin_cant*atom.kkrsz; // size of t00 block
// reference algorithm. Use LU factorization and linear solve for dense matrices in LAPACK
hipDoubleComplex *devTau = (hipDoubleComplex *)d.getDevTau();
hipDoubleComplex *devTau00 = (hipDoubleComplex *)d.getDevTau00();
void *devWork = d.getDevWork();
size_t devWorkBytes = d.getDevWorkBytes();
int64_t *devIpiv=d.getDevIpvt64();
void *hostWork = d.getHostWork();
size_t hostWorkBytes = d.getHostWorkBytes();
int *devInfo = d.getDevInfo();
zeroMatrixCuda(devTau, nrmat_ns, kkrsz_ns);
deviceCheckError();
hipLaunchKernelGGL(( copyTMatrixToTauCuda), dim3(kkrsz_ns),dim3(1), 0, 0, devTau, (hipDoubleComplex *)tMatrix, kkrsz_ns, nrmat_ns);
deviceCheckError();
cusolverCheckError(cusolverDnXgetrf(cusolverDnHandle,
cusolverDnParams,
(int64_t)nrmat_ns,
(int64_t)nrmat_ns,
HIP_C_64F,
(hipDoubleComplex *)devM,
(int64_t)nrmat_ns,
devIpiv,
HIP_C_64F,
devWork,
devWorkBytes,
hostWork,
hostWorkBytes,
devInfo));
cusolverCheckError(cusolverDnXgetrs(cusolverDnHandle,
cusolverDnParams,
HIPBLAS_OP_N,
(int64_t)nrmat_ns,
(int64_t)kkrsz_ns,
HIP_C_64F,
(hipDoubleComplex *)devM,
(int64_t)nrmat_ns,
devIpiv,
HIP_C_64F,
devTau,
(int64_t)nrmat_ns,
devInfo));
// copy result into tau00
hipLaunchKernelGGL(( copyTauToTau00Cuda), dim3(kkrsz_ns),dim3(1), 0, 0, devTau00, devTau, kkrsz_ns, nrmat_ns);
deviceCheckError();
transferMatrixFromGPUCuda(tau00, devTau00);
deviceCheckError();
}
#endif
#ifdef USE_IRSXGESV
void solveTau00IRSXgesv_cusolver(LSMSSystemParameters &lsms, LocalTypeInfo &local, DeviceStorage &d, AtomData &atom,
Complex *tMatrix, Complex *devM, Matrix<Complex> &tau00, int ispin)
{
hipsolverDnHandle_t cusolverDnHandle = DeviceStorage::getCusolverDnHandle();
cusolverDnIRSParams_t cusolverDnIRSParams = DeviceStorage::getCusolverIRSParams();
cusolverDnIRSInfos_t cusolverDnIRSInfo = DeviceStorage::getCusolverIRSInfo();
int nrmat_ns = lsms.n_spin_cant*atom.nrmat; // total size of the kkr matrix
int kkrsz_ns = lsms.n_spin_cant*atom.kkrsz; // size of t00 block
// reference algorithm. Use LU factorization and linear solve for dense matrices in LAPACK
hipDoubleComplex *devTau = (hipDoubleComplex *)d.getDevTau();
hipDoubleComplex *devTau00 = (hipDoubleComplex *)d.getDevTau00();
hipDoubleComplex *devWork = (hipDoubleComplex *)d.getDevWork();
hipDoubleComplex *devX = (hipDoubleComplex *)d.getDevX();
size_t devWorkBytes = d.getDevWorkBytes();
int *devInfo = d.getDevInfo();
zeroMatrixCuda(devTau, nrmat_ns, kkrsz_ns);
deviceCheckError();
hipLaunchKernelGGL(( copyTMatrixToTauCuda), dim3(kkrsz_ns),dim3(1), 0, 0, devTau, (hipDoubleComplex *)tMatrix, kkrsz_ns, nrmat_ns);
deviceCheckError();
cusolverDnIRSInfos_t info;
int niters;
cusolverCheckError(cusolverDnIRSXgesv(cusolverDnHandle,
cusolverDnIRSParams,
cusolverDnIRSInfo,
nrmat_ns,
kkrsz_ns,
(hipDoubleComplex *)devM,
nrmat_ns,
devTau,
nrmat_ns,
devX,
nrmat_ns,
devWork,
devWorkBytes,
&niters,
devInfo));
// copy result into tau00
hipLaunchKernelGGL(( copyTauToTau00Cuda), dim3(kkrsz_ns),dim3(1), 0, 0, devTau00, devX, kkrsz_ns, nrmat_ns);
deviceCheckError();
transferMatrixFromGPUCuda(tau00, devTau00);
deviceCheckError();
}
#endif
|
9dc93d9cb1de9aa23c531d6441cb2239095e524a.cu
|
/* -*- mode: C++; c-file-style: "bsd"; c-basic-offset: 2; indent-tabs-mode: nil -*- */
#include "linearSolvers.hpp"
#include <stdio.h>
#include <vector>
#include <cuda_runtime.h>
#include <cuComplex.h>
#include <cublas_v2.h>
#include <cusolverDn.h>
#include "Complex.hpp"
#include "Matrix.hpp"
#include "Accelerator/DeviceStorage.hpp"
#include "Accelerator/deviceCheckError.hpp"
/*
#define IDX(i, j, lDim) (((j)*(lDim))+(i))
template <typename T>
void zeroMatrixCuda(T *devM, int lDim, int nCol)
{
// for(int i=0; i<m.n_row(); i++)
// for(int j=0; j<m.n_col(); j++)
// m(i,j) = 0.0;
cudaMemset(devM, 0, lDim*nCol*sizeof(T));
}
template <typename T>
__global__ void setDiagonalKernelCuda(T *devM, int lDim, int nCol, T val)
{
int i=blockIdx.x*blockDim.x + threadIdx.x;
if(i<nCol)
{
devM[IDX(i, i, lDim)] = val;
}
}
template <typename T>
__global__ void addDiagonalKernelCuda(T *devM, int lDim, int nCol, T val)
{
int i=blockIdx.x*blockDim.x + threadIdx.x;
if(i<nCol)
{
devM[IDX(i, i, lDim)] = cuCadd(devM[IDX(i, i, lDim)], val);
}
}
template <typename T>
void unitMatrixCuda(T *devM, int lDim, int nCol)
{
zeroMatrixCuda(devM, lDim, nCol);
setDiagonalKernelCuda<<<nCol,1>>>(devM, lDim, nCol, 1.0);
}
*/
template <typename T>
__global__ void zeroDiagonalBlocksKernelCuda(T *devM, int lDim, int nCol, int blockSize)
{
int iBlock = blockIdx.x*blockDim.x + threadIdx.x;
int jBlock = blockIdx.y*blockDim.y + threadIdx.y;
if(iBlock<nCol/blockSize)
if(jBlock<nCol/blockSize)
{
int ii=iBlock*blockSize;
int jj=jBlock*blockSize;
for(int i=0; i<std::min(blockSize, nCol-ii); i++)
for(int j=0; j<std::min(blockSize, nCol-jj); j++)
devM[IDX(ii+i, jj+j, lDim)] = 0.0;
}
}
void transferT0MatrixToGPUCuda(Complex *devT0, LSMSSystemParameters &lsms, LocalTypeInfo &local,
AtomData &atom, int iie, int ispin)
{
int kkrsz_ns = lsms.n_spin_cant*atom.kkrsz;
int jsm = kkrsz_ns * kkrsz_ns * ispin;
cudaMemcpy(devT0, &local.tmatStore(iie*local.blkSizeTmatStore + jsm,atom.LIZStoreIdx[0]),
kkrsz_ns*kkrsz_ns*sizeof(cuDoubleComplex), cudaMemcpyHostToDevice);
#ifdef T_CUDA_DEBUG
std::ofstream myfile;
auto filename = "TAU" + std::to_string(iie) + "_" + std::to_string(ispin);
myfile.open(filename.c_str());
for (int i = 0; i < kkrsz_ns * kkrsz_ns; i++) {
Complex buffer;
cudaMemcpy(&buffer, &devT0[i], sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost);
myfile << std::real(buffer) << " " << std::imag(buffer) << std::endl;
}
myfile.close();
#endif
}
void transferFullTMatrixToGPUCUDA(Complex *devT, LSMSSystemParameters &lsms, LocalTypeInfo &local,
AtomData &atom, int ispin)
{
int kkrsz_ns = lsms.n_spin_cant * atom.kkrsz;
int nrmat_ns = lsms.n_spin_cant * atom.nrmat;
Matrix <Complex> bigT(nrmat_ns, nrmat_ns);
if (lsms.n_spin_pola == lsms.n_spin_cant) { // non polarized or spin canted
for (int l = 0; l < atom.numLIZ; l++) {
for (int i = 0; i < kkrsz_ns; i++) {
for (int j = 0; j < kkrsz_ns; j++) {
bigT(l*kkrsz_ns + i, l*kkrsz_ns + j) = local.tmatStore(i + j * kkrsz_ns, atom.LIZStoreIdx[l]);
}
}
}
} else {
int jsm = kkrsz_ns * kkrsz_ns * ispin;
for (int l = 0; l < atom.numLIZ; l++) {
for (int i = 0; i < kkrsz_ns; i++) {
for (int j = 0; j < kkrsz_ns; j++) {
bigT(l*kkrsz_ns + i, l*kkrsz_ns + j) = local.tmatStore(i + j * kkrsz_ns + jsm, atom.LIZStoreIdx[l]);
}
}
}
}
/*
std::cout << "00 block of bigT on the CPU" << std::endl;
for (int i = 0; i <kkrsz_ns; i++){
for (int j = 0; j < kkrsz_ns; j++){
std::cout << bigT(i,j) << " ";
}
std::cout << std::endl;
}
*/
cudaMemcpy(devT, &bigT(0,0), nrmat_ns*nrmat_ns*sizeof(cuDoubleComplex), cudaMemcpyHostToDevice);
}
void transferMatrixToGPUCuda(Complex *devM, Matrix<Complex> &m)
{
cudaMemcpy(devM, &m(0,0), m.l_dim()*m.n_col()*sizeof(cuDoubleComplex), cudaMemcpyHostToDevice);
}
void transferMatrixFromGPUCuda(Matrix<Complex> &m, cuDoubleComplex *devM)
{
cudaMemcpy(&m(0,0), devM, m.l_dim()*m.n_col()*sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost);
}
__global__ void copyTMatrixToTauCuda(cuDoubleComplex *tau, cuDoubleComplex *t, int kkrsz, int nrmat)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < kkrsz)
{
for(int j=0; j<kkrsz; j++)
tau[IDX(i,j,nrmat)] = t[IDX(i,j,kkrsz)];
}
}
__global__ void copyBigTMatrixToTauCuda(cuDoubleComplex *tau, cuDoubleComplex *t, int nrmat)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < nrmat)
{
for(int j=0;j<nrmat;j++){
tau[IDX(i,j,nrmat)] = t[IDX(i,j,nrmat)];
}
}
}
__global__ void copyTauToTau00Cuda(cuDoubleComplex *tau00, cuDoubleComplex *tau, int kkrsz, int nrmat)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < kkrsz)
{
for(int j=0; j<kkrsz; j++)
tau00[IDX(i,j,kkrsz)] = tau[IDX(i,j,nrmat)];
}
}
void solveTau00zgetrf_cublas(LSMSSystemParameters &lsms, LocalTypeInfo &local, DeviceStorage &d, AtomData &atom,
Complex *tMatrix, Complex *devM,
Matrix<Complex> &tau00)
{
cublasHandle_t cublasHandle = DeviceStorage::getCublasHandle();
int nrmat_ns = lsms.n_spin_cant*atom.nrmat; // total size of the kkr matrix
int kkrsz_ns = lsms.n_spin_cant*atom.kkrsz; // size of t00 block
// reference algorithm. Use LU factorization and linear solve for dense matrices in LAPACK
cuDoubleComplex *Aarray[1], *Barray[1];
cuDoubleComplex *devTau = (cuDoubleComplex *)d.getDevTau();
cuDoubleComplex *devTau00 = (cuDoubleComplex *)d.getDevTau00();
// printf("zero Matrix\n");
zeroMatrixCuda(devTau, nrmat_ns, kkrsz_ns);
deviceCheckError();
// printf("copyTMatrixToTau\n");
copyTMatrixToTauCuda<<<kkrsz_ns,1>>>(devTau, (cuDoubleComplex *)tMatrix, kkrsz_ns, nrmat_ns);
deviceCheckError();
Barray[0] = devTau;
Aarray[0] = (cuDoubleComplex *)devM;
int *ipivArray=d.getDevIpvt();
int *infoArray = d.getDevInfo();
int info;
// printf("cublasZgetrfBatched\n");
cublasCheckError(cublasZgetrfBatched(cublasHandle, nrmat_ns, Aarray, nrmat_ns, ipivArray, infoArray, 1));
// printf("cublasZgetrsBatched\n");
cublasCheckError(cublasZgetrsBatched(cublasHandle, CUBLAS_OP_N, nrmat_ns, kkrsz_ns, Aarray, nrmat_ns, ipivArray,
Barray, nrmat_ns, &info, 1));
// copy result into tau00
// printf("copyTauToTau00\n");
copyTauToTau00Cuda<<<kkrsz_ns,1>>>(devTau00, devTau, kkrsz_ns, nrmat_ns);
deviceCheckError();
// printf("transferMatrixFromGPU\n");
transferMatrixFromGPUCuda(tau00, devTau00);
deviceCheckError();
}
void solveTauFullzgetrf_cublas(LSMSSystemParameters &lsms, LocalTypeInfo &local, DeviceStorage &d, AtomData &atom,
Complex *tMatrix, Complex *devM, Complex *devTauFull)
{
cublasHandle_t cublasHandle = DeviceStorage::getCublasHandle();
int nrmat_ns = lsms.n_spin_cant * atom.nrmat;
int kkrsz_ns = lsms.n_spin_cant * atom.kkrsz;
cuDoubleComplex *Aarray[1], *Barray[1];
//cuDoubleComplex *devTauFull = (cuDoubleComplex *)d.getDevTauFull();
zeroMatrixCuda((cuDoubleComplex *)devTauFull, nrmat_ns, nrmat_ns);
deviceCheckError();
copyBigTMatrixToTauCuda<<<nrmat_ns,1>>>((cuDoubleComplex *)devTauFull, (cuDoubleComplex *)tMatrix, nrmat_ns);
deviceCheckError();
Barray[0] = (cuDoubleComplex *) devTauFull;
Aarray[0] = (cuDoubleComplex *) devM;
int *ipivArray=d.getDevIpvt();
int *infoArray = d.getDevInfo();
int info;
cublasCheckError(cublasZgetrfBatched(cublasHandle, nrmat_ns, Aarray, nrmat_ns, ipivArray, infoArray, 1));
// printf("cublasZgetrsBatched\n");
cublasCheckError(cublasZgetrsBatched(cublasHandle, CUBLAS_OP_N, nrmat_ns, nrmat_ns, Aarray, nrmat_ns, ipivArray,
Barray, nrmat_ns, &info, 1));
//transferMatrixFromGPUCuda(tau, devTauFull);
deviceCheckError();
}
#ifndef ARCH_IBM
void solveTau00zzgesv_cusolver(LSMSSystemParameters &lsms, LocalTypeInfo &local, DeviceStorage &d, AtomData &atom,
Complex *tMatrix, Complex *devM, Matrix<Complex> &tau00, int ispin)
{
cusolverDnHandle_t cusolverDnHandle = DeviceStorage::getCusolverDnHandle();
int nrmat_ns = lsms.n_spin_cant*atom.nrmat; // total size of the kkr matrix
int kkrsz_ns = lsms.n_spin_cant*atom.kkrsz; // size of t00 block
// reference algorithm. Use LU factorization and linear solve for dense matrices in LAPACK
cuDoubleComplex *devTau = (cuDoubleComplex *)d.getDevTau();
cuDoubleComplex *devTau00 = (cuDoubleComplex *)d.getDevTau00();
cuDoubleComplex *devWork = (cuDoubleComplex *)d.getDevWork();
cuDoubleComplex *devT = (cuDoubleComplex *)d.getDevT();
int *devIpiv = d.getDevIpvt();
int devInfo[1]; // d.getDevInfo();
zeroMatrixCuda(devTau, nrmat_ns, kkrsz_ns);
zeroMatrixCuda(devT, nrmat_ns, kkrsz_ns);
copyTMatrixToTauCuda<<<kkrsz_ns,1>>>(devT, (cuDoubleComplex *)tMatrix, kkrsz_ns, nrmat_ns);
int iter;
cusolverStatus_t status = cusolverDnZZgesv(cusolverDnHandle, nrmat_ns, kkrsz_ns,
(cuDoubleComplex *)devM, nrmat_ns, devIpiv, devT, nrmat_ns, devTau, nrmat_ns,
devWork, d.getDevWorkBytes(), &iter, devInfo);
if(status!=CUSOLVER_STATUS_SUCCESS)
{
printf("cusolverDnZZgesv returned %d\n",status);
}
copyTauToTau00Cuda<<<kkrsz_ns,1>>>(devTau00, devTau, kkrsz_ns, nrmat_ns);
transferMatrixFromGPUCuda(tau00, devTau00);
}
#endif
void solveTau00zgetrf_cusolver(LSMSSystemParameters &lsms, LocalTypeInfo &local, DeviceStorage &d, AtomData &atom,
Complex *tMatrix, Complex *devM, Matrix<Complex> &tau00, int ispin)
{
cusolverDnHandle_t cusolverDnHandle = DeviceStorage::getCusolverDnHandle();
int nrmat_ns = lsms.n_spin_cant*atom.nrmat; // total size of the kkr matrix
int kkrsz_ns = lsms.n_spin_cant*atom.kkrsz; // size of t00 block
// reference algorithm. Use LU factorization and linear solve for dense matrices in LAPACK
cuDoubleComplex *devTau = (cuDoubleComplex *)d.getDevTau();
cuDoubleComplex *devTau00 = (cuDoubleComplex *)d.getDevTau00();
cuDoubleComplex *devWork = (cuDoubleComplex *)d.getDevWork();
int *devIpiv = d.getDevIpvt();
int *devInfo = d.getDevInfo();
zeroMatrixCuda(devTau, nrmat_ns, kkrsz_ns);
deviceCheckError();
copyTMatrixToTauCuda<<<kkrsz_ns,1>>>(devTau, (cuDoubleComplex *)tMatrix, kkrsz_ns, nrmat_ns);
deviceCheckError();
cusolverCheckError(cusolverDnZgetrf(cusolverDnHandle, nrmat_ns, nrmat_ns,
(cuDoubleComplex *)devM, nrmat_ns, devWork, devIpiv,
devInfo ));
cusolverCheckError(cusolverDnZgetrs(cusolverDnHandle, CUBLAS_OP_N, nrmat_ns, kkrsz_ns,
(cuDoubleComplex *)devM, nrmat_ns, devIpiv, devTau, nrmat_ns, devInfo));
// copy result into tau00
copyTauToTau00Cuda<<<kkrsz_ns,1>>>(devTau00, devTau, kkrsz_ns, nrmat_ns);
deviceCheckError();
transferMatrixFromGPUCuda(tau00, devTau00);
deviceCheckError();
}
void solveTauFullzgetrf_cusolver(LSMSSystemParameters &lsms, LocalTypeInfo &local, DeviceStorage &d, AtomData &atom,
Complex *tMatrix, Complex *devM, Complex *devTauFull, int ispin)
{
cusolverDnHandle_t cusolverDnHandle = DeviceStorage::getCusolverDnHandle();
int nrmat_ns = lsms.n_spin_cant*atom.nrmat; // total size of the kkr matrix
int kkrsz_ns = lsms.n_spin_cant*atom.kkrsz; // size of t00 block
// reference algorithm. Use LU factorization and linear solve for dense matrices in LAPACK
int *devIpiv = d.getDevIpvt();
int *devInfo = d.getDevInfo();
cuDoubleComplex *devWork = (cuDoubleComplex *)d.getDevWork();
zeroMatrixCuda(devTauFull, nrmat_ns, nrmat_ns);
deviceCheckError();
copyBigTMatrixToTauCuda<<<nrmat_ns,1>>>((cuDoubleComplex *)devTauFull, (cuDoubleComplex *)tMatrix, nrmat_ns);
deviceCheckError();
cusolverCheckError(cusolverDnZgetrf(cusolverDnHandle, nrmat_ns, nrmat_ns,
(cuDoubleComplex *)devM, nrmat_ns, devWork, devIpiv,
devInfo ));
//std::cout << nrmat_ns << " " << std::endl;
//printf(" %p %p %p %p\n", devM, devTauFull, devIpiv, devInfo);
cusolverCheckError(cusolverDnZgetrs(cusolverDnHandle, CUBLAS_OP_N, nrmat_ns, nrmat_ns,
(cuDoubleComplex *)devM, nrmat_ns, devIpiv, (cuDoubleComplex *)devTauFull, nrmat_ns, devInfo));
//transferMatrixFromGPUCuda(tau, devTauFull);
deviceCheckError();
}
#ifdef USE_XGETRF
void solveTau00Xgetrf_cusolver(LSMSSystemParameters &lsms, LocalTypeInfo &local, DeviceStorage &d, AtomData &atom,
Complex *tMatrix, Complex *devM, Matrix<Complex> &tau00, int ispin)
{
cusolverDnHandle_t cusolverDnHandle = DeviceStorage::getCusolverDnHandle();
cusolverDnParams_t cusolverDnParams = DeviceStorage::getCusolverParams();
int nrmat_ns = lsms.n_spin_cant*atom.nrmat; // total size of the kkr matrix
int kkrsz_ns = lsms.n_spin_cant*atom.kkrsz; // size of t00 block
// reference algorithm. Use LU factorization and linear solve for dense matrices in LAPACK
cuDoubleComplex *devTau = (cuDoubleComplex *)d.getDevTau();
cuDoubleComplex *devTau00 = (cuDoubleComplex *)d.getDevTau00();
void *devWork = d.getDevWork();
size_t devWorkBytes = d.getDevWorkBytes();
int64_t *devIpiv=d.getDevIpvt64();
void *hostWork = d.getHostWork();
size_t hostWorkBytes = d.getHostWorkBytes();
int *devInfo = d.getDevInfo();
zeroMatrixCuda(devTau, nrmat_ns, kkrsz_ns);
deviceCheckError();
copyTMatrixToTauCuda<<<kkrsz_ns,1>>>(devTau, (cuDoubleComplex *)tMatrix, kkrsz_ns, nrmat_ns);
deviceCheckError();
cusolverCheckError(cusolverDnXgetrf(cusolverDnHandle,
cusolverDnParams,
(int64_t)nrmat_ns,
(int64_t)nrmat_ns,
CUDA_C_64F,
(cuDoubleComplex *)devM,
(int64_t)nrmat_ns,
devIpiv,
CUDA_C_64F,
devWork,
devWorkBytes,
hostWork,
hostWorkBytes,
devInfo));
cusolverCheckError(cusolverDnXgetrs(cusolverDnHandle,
cusolverDnParams,
CUBLAS_OP_N,
(int64_t)nrmat_ns,
(int64_t)kkrsz_ns,
CUDA_C_64F,
(cuDoubleComplex *)devM,
(int64_t)nrmat_ns,
devIpiv,
CUDA_C_64F,
devTau,
(int64_t)nrmat_ns,
devInfo));
// copy result into tau00
copyTauToTau00Cuda<<<kkrsz_ns,1>>>(devTau00, devTau, kkrsz_ns, nrmat_ns);
deviceCheckError();
transferMatrixFromGPUCuda(tau00, devTau00);
deviceCheckError();
}
#endif
#ifdef USE_IRSXGESV
void solveTau00IRSXgesv_cusolver(LSMSSystemParameters &lsms, LocalTypeInfo &local, DeviceStorage &d, AtomData &atom,
Complex *tMatrix, Complex *devM, Matrix<Complex> &tau00, int ispin)
{
cusolverDnHandle_t cusolverDnHandle = DeviceStorage::getCusolverDnHandle();
cusolverDnIRSParams_t cusolverDnIRSParams = DeviceStorage::getCusolverIRSParams();
cusolverDnIRSInfos_t cusolverDnIRSInfo = DeviceStorage::getCusolverIRSInfo();
int nrmat_ns = lsms.n_spin_cant*atom.nrmat; // total size of the kkr matrix
int kkrsz_ns = lsms.n_spin_cant*atom.kkrsz; // size of t00 block
// reference algorithm. Use LU factorization and linear solve for dense matrices in LAPACK
cuDoubleComplex *devTau = (cuDoubleComplex *)d.getDevTau();
cuDoubleComplex *devTau00 = (cuDoubleComplex *)d.getDevTau00();
cuDoubleComplex *devWork = (cuDoubleComplex *)d.getDevWork();
cuDoubleComplex *devX = (cuDoubleComplex *)d.getDevX();
size_t devWorkBytes = d.getDevWorkBytes();
int *devInfo = d.getDevInfo();
zeroMatrixCuda(devTau, nrmat_ns, kkrsz_ns);
deviceCheckError();
copyTMatrixToTauCuda<<<kkrsz_ns,1>>>(devTau, (cuDoubleComplex *)tMatrix, kkrsz_ns, nrmat_ns);
deviceCheckError();
cusolverDnIRSInfos_t info;
int niters;
cusolverCheckError(cusolverDnIRSXgesv(cusolverDnHandle,
cusolverDnIRSParams,
cusolverDnIRSInfo,
nrmat_ns,
kkrsz_ns,
(cuDoubleComplex *)devM,
nrmat_ns,
devTau,
nrmat_ns,
devX,
nrmat_ns,
devWork,
devWorkBytes,
&niters,
devInfo));
// copy result into tau00
copyTauToTau00Cuda<<<kkrsz_ns,1>>>(devTau00, devX, kkrsz_ns, nrmat_ns);
deviceCheckError();
transferMatrixFromGPUCuda(tau00, devTau00);
deviceCheckError();
}
#endif
|
954d30314d66b3e0b07683d9a0d3ca70976c6193.hip
|
// !!! This is a file automatically generated by hipify!!!
/* PAPI Multiple GPU example. This example is taken from the NVIDIA
* documentation (Copyright 1993-2013 NVIDIA Corporation) and has been
* adapted to show the use of CUPTI and PAPI in collecting event
* counters for multiple GPU contexts. PAPI Team (2015)
*/
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This application demonstrates how to use the CUDA API to use multiple GPUs,
* with an emphasis on simple illustration of the techniques (not on performance).
*
* Note that in order to detect multiple GPUs in your system you have to disable
* SLI in the nvidia control panel. Otherwise only one GPU is visible to the
* application. On the other side, you can still extend your desktop to screens
* attached to both GPUs.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <cupti.h>
#include <timer.h>
#include "papi_test.h"
#if not defined PAPI
#undef PAPI
#endif
#if not defined CUPTI_ONLY
#undef CUPTI_ONLY
#endif
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
#include "simpleMultiGPU.h"
// //////////////////////////////////////////////////////////////////////////////
// Data configuration
// //////////////////////////////////////////////////////////////////////////////
const int MAX_GPU_COUNT = 32;
const int DATA_N = 48576 * 32;
#ifdef PAPI
const int MAX_NUM_EVENTS = 32;
#endif
#define CHECK_CU_ERROR(err, cufunc) \
if (err != hipSuccess) { printf ("Error %d for CUDA Driver API function '%s'\n", err, cufunc); return -1; }
#define CHECK_CUDA_ERROR(err) \
if (err != hipSuccess) { printf ("Error %d for CUDA \n", err ); return -1; }
#define CHECK_CUPTI_ERROR(err, cuptifunc) \
if (err != CUPTI_SUCCESS) { printf ("Error %d for CUPTI API function '%s'\n", err, cuptifunc); return -1; }
// //////////////////////////////////////////////////////////////////////////////
// Simple reduction kernel.
// Refer to the 'reduction' CUDA SDK sample describing
// reduction optimization strategies
// //////////////////////////////////////////////////////////////////////////////
__global__ static void reduceKernel( float *d_Result, float *d_Input, int N )
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int threadN = gridDim.x * blockDim.x;
float sum = 0;
for( int pos = tid; pos < N; pos += threadN )
sum += d_Input[pos];
d_Result[tid] = sum;
}
// //////////////////////////////////////////////////////////////////////////////
// Program main
// //////////////////////////////////////////////////////////////////////////////
int main( int argc, char **argv )
{
// Solver config
TGPUplan plan[MAX_GPU_COUNT];
// GPU reduction results
float h_SumGPU[MAX_GPU_COUNT];
float sumGPU;
double sumCPU, diff;
int i, j, gpuBase, GPU_N;
const int BLOCK_N = 32;
const int THREAD_N = 256;
const int ACCUM_N = BLOCK_N * THREAD_N;
printf( "Starting simpleMultiGPU\n" );
// Report on the available CUDA devices
int computeCapabilityMajor = 0, computeCapabilityMinor = 0;
int runtimeVersion = 0, driverVersion = 0;
int deviceNum = -1;
char deviceName[32];
hipDevice_t dev;
CHECK_CUDA_ERROR( hipGetDeviceCount( &GPU_N ) );
if( GPU_N > MAX_GPU_COUNT ) GPU_N = MAX_GPU_COUNT;
printf( "CUDA-capable device count: %i\n", GPU_N );
for ( deviceNum=0; deviceNum<GPU_N; deviceNum++ ) {
CHECK_CU_ERROR( hipDeviceGet( &dev, deviceNum ), "hipDeviceGet" );
CHECK_CU_ERROR( hipDeviceGetName( deviceName, 32, dev ), "hipDeviceGetName" );
CHECK_CU_ERROR( hipDeviceComputeCapability( &computeCapabilityMajor, &computeCapabilityMinor, dev ), "hipDeviceComputeCapability" );
hipRuntimeGetVersion( &runtimeVersion );
hipDriverGetVersion( &driverVersion );
printf( "CUDA Device %d: %s : computeCapability %d.%d runtimeVersion %d.%d driverVersion %d.%d\n", deviceNum, deviceName, computeCapabilityMajor, computeCapabilityMinor, runtimeVersion/1000, (runtimeVersion%100)/10, driverVersion/1000, (driverVersion%100)/10 );
if ( computeCapabilityMajor < 2 ) {
printf( "CUDA Device %d compute capability is too low... will not add any more GPUs\n", deviceNum );
GPU_N = deviceNum;
break;
}
}
uint32_t cupti_linked_version;
cuptiGetVersion( &cupti_linked_version );
printf("CUPTI version: Compiled against version %d; Linked against version %d\n", CUPTI_API_VERSION, cupti_linked_version );
printf( "Generating input data...\n" );
// Subdividing input data across GPUs
// Get data sizes for each GPU
for( i = 0; i < GPU_N; i++ )
plan[i].dataN = DATA_N / GPU_N;
// Take into account "odd" data sizes
for( i = 0; i < DATA_N % GPU_N; i++ )
plan[i].dataN++;
// Assign data ranges to GPUs
gpuBase = 0;
for( i = 0; i < GPU_N; i++ ) {
plan[i].h_Sum = h_SumGPU + i; // point within h_SumGPU array
gpuBase += plan[i].dataN;
}
// Create streams for issuing GPU command asynchronously and allocate memory (GPU and System page-locked)
for( i = 0; i < GPU_N; i++ ) {
CHECK_CUDA_ERROR( hipSetDevice( i ) );
// hipFree: forces creation of a context
CHECK_CUDA_ERROR( hipFree( NULL ) );
CHECK_CUDA_ERROR( hipStreamCreate( &plan[i].stream ) );
// Allocate memory
CHECK_CUDA_ERROR( hipMalloc( ( void ** ) &plan[i].d_Data, plan[i].dataN * sizeof( float ) ) );
CHECK_CUDA_ERROR( hipMalloc( ( void ** ) &plan[i].d_Sum, ACCUM_N * sizeof( float ) ) );
CHECK_CUDA_ERROR( hipHostMalloc( ( void ** ) &plan[i].h_Sum_from_device, ACCUM_N * sizeof( float ) ) );
CHECK_CUDA_ERROR( hipHostMalloc( ( void ** ) &plan[i].h_Data, plan[i].dataN * sizeof( float ) ) );
for( j = 0; j < plan[i].dataN; j++ ) {
plan[i].h_Data[j] = ( float ) rand() / ( float ) RAND_MAX;
}
}
#ifdef CUPTI_ONLY
printf("Setup CUPTI counters internally for elapsed_cycles_sm event (CUPTI_ONLY)\n");
hipDevice_t device[MAX_GPU_COUNT];
hipCtx_t ctx[MAX_GPU_COUNT];
hipCtx_t ctxpopped[MAX_GPU_COUNT];
CUpti_EventGroup eg[MAX_GPU_COUNT];
CUpti_EventID myevent;//elapsed cycles
for ( i=0; i<GPU_N; i++ ) {
CHECK_CUDA_ERROR( hipSetDevice( i ) );
CHECK_CU_ERROR( hipDeviceGet( &device[i], i ), "hipDeviceGet" );
CHECK_CU_ERROR( hipCtxCreate( &ctx[i], 0, device[i] ), "hipCtxCreate" );
CHECK_CUPTI_ERROR( cuptiEventGroupCreate( ctx[i], &eg[i], 0 ), "cuptiEventGroupCreate" );
cuptiEventGetIdFromName ( device[i], "elapsed_cycles_sm", &myevent );
CHECK_CUPTI_ERROR( cuptiEventGroupAddEvent( eg[i], myevent ), "cuptiEventGroupAddEvent" );
CHECK_CUPTI_ERROR( cuptiEventGroupEnable( eg[i] ), "cuptiEventGroupEnable" );
CHECK_CU_ERROR( cuCtxPopCurrent( &ctxpopped[i] ), "cuCtxPopCurrent" );
}
#endif
#ifdef PAPI
printf("Setup PAPI counters internally (PAPI)\n");
int EventSet = PAPI_NULL;
int NUM_EVENTS = MAX_GPU_COUNT*MAX_NUM_EVENTS;
long long values[NUM_EVENTS];
int eventCount;
int retval, gg, ee;
/* PAPI Initialization */
retval = PAPI_library_init( PAPI_VER_CURRENT );
if( retval != PAPI_VER_CURRENT ) fprintf( stderr, "PAPI_library_init failed\n" );
printf( "PAPI version: %d.%d.%d\n", PAPI_VERSION_MAJOR( PAPI_VERSION ), PAPI_VERSION_MINOR( PAPI_VERSION ), PAPI_VERSION_REVISION( PAPI_VERSION ) );
retval = PAPI_create_eventset( &EventSet );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_create_eventset failed\n" );
// In this example measure 2 events from each GPU
int numEventEndings = 2;
static char *EventEndings[] = { (char*)"inst_executed", (char *)"elapsed_cycles_sm" };
// Add events at a GPU specific level ... eg cuda:::device:2:elapsed_cycles_sm
char *EventName[NUM_EVENTS];
char tmpEventName[50];
eventCount = 0;
for( gg = 0; gg < GPU_N; gg++ ) {
CHECK_CUDA_ERROR( hipSetDevice( gg ) ); // Set device
for ( ee=0; ee<numEventEndings; ee++ ) {
snprintf( tmpEventName, 50, "cuda:::device:%d:%s\0", gg, EventEndings[ee] );
printf( "Trying to add event %s to GPU %d in PAPI...", tmpEventName , gg );
retval = PAPI_add_named_event( EventSet, tmpEventName );
if (retval==PAPI_OK) {
printf( "Added event\n" );
EventName[eventCount] = (char *)calloc( 50, sizeof(char) );
snprintf( EventName[eventCount], 50, "%s", tmpEventName );
eventCount++;
} else {
printf( "Could not add event\n" );
}
}
}
// Start PAPI event measurement
retval = PAPI_start( EventSet );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_start failed\n" );
#endif
// Start timing and compute on GPU(s)
printf( "Computing with %d GPUs...\n", GPU_N );
StartTimer();
// Copy data to GPU, launch the kernel and copy data back. All asynchronously
for( i = GPU_N-1; i >= 0; i-- ) {
// Set device
CHECK_CUDA_ERROR( hipSetDevice( i ) );
//AYK CHECK_CUPTI_ERROR( cuptiEventGroupResetAllEvents ( eg[i] ), "cuptiEventGroupResetAllEvents" );
// Copy input data from CPU
CHECK_CUDA_ERROR( hipMemcpyAsync( plan[i].d_Data, plan[i].h_Data, plan[i].dataN * sizeof( float ), hipMemcpyHostToDevice, plan[i].stream ) );
// Perform GPU computations
hipLaunchKernelGGL(( reduceKernel) , dim3(BLOCK_N), dim3(THREAD_N), 0, plan[i].stream , plan[i].d_Sum, plan[i].d_Data, plan[i].dataN );
if ( hipGetLastError() != hipSuccess ) { printf( "reduceKernel() execution failed (GPU %d).\n", i ); exit(EXIT_FAILURE); }
// Read back GPU results
CHECK_CUDA_ERROR( hipMemcpyAsync( plan[i].h_Sum_from_device, plan[i].d_Sum, ACCUM_N * sizeof( float ), hipMemcpyDeviceToHost, plan[i].stream ) );
}
// Process GPU results
printf( "Process GPU results on %d GPUs...\n", GPU_N );
for( i = 0; i < GPU_N; i++ ) {
float sum;
// Set device
CHECK_CUDA_ERROR( hipSetDevice( i ) );
// Wait for all operations to finish
hipStreamSynchronize( plan[i].stream );
// Finalize GPU reduction for current subvector
sum = 0;
for( j = 0; j < ACCUM_N; j++ ) {
sum += plan[i].h_Sum_from_device[j];
}
*( plan[i].h_Sum ) = ( float ) sum;
}
double gpuTime = GetTimer();
#ifdef CUPTI_ONLY
size_t size = 1024;
uint64_t buffer[1024];
for ( i=0; i<GPU_N; i++ ) {
CHECK_CUDA_ERROR( hipSetDevice( i ) );
CHECK_CU_ERROR( hipCtxSynchronize( ), "hipCtxSynchronize" );
CHECK_CUPTI_ERROR( cuptiEventGroupReadEvent ( eg[i], CUPTI_EVENT_READ_FLAG_NONE, myevent, &size, &buffer[i] ), "cuptiEventGroupReadEvent" );
printf( "CUPTI elapsed_cycles_sm device %d counterValue %u\n", i, buffer[i] );
}
#endif
#ifdef PAPI
retval = PAPI_stop( EventSet, values );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_stop failed\n" );
for( i = 0; i < eventCount; i++ )
printf( "PAPI counterValue %12lld \t\t --> %s \n", values[i], EventName[i] );
retval = PAPI_cleanup_eventset( EventSet );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_cleanup_eventset failed\n" );
retval = PAPI_destroy_eventset( &EventSet );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_destroy_eventset failed\n" );
PAPI_shutdown();
#endif
for( i = 0; i < GPU_N; i++ ) {
CHECK_CUDA_ERROR( hipHostFree( plan[i].h_Sum_from_device ) );
CHECK_CUDA_ERROR( hipFree( plan[i].d_Sum ) );
CHECK_CUDA_ERROR( hipFree( plan[i].d_Data ) );
// Shut down this GPU
CHECK_CUDA_ERROR( hipStreamDestroy( plan[i].stream ) );
}
sumGPU = 0;
for( i = 0; i < GPU_N; i++ ) {
sumGPU += h_SumGPU[i];
}
printf( " GPU Processing time: %f (ms)\n", gpuTime );
// Compute on Host CPU
printf( "Computing the same result with Host CPU...\n" );
StartTimer();
sumCPU = 0;
for( i = 0; i < GPU_N; i++ ) {
for( j = 0; j < plan[i].dataN; j++ ) {
sumCPU += plan[i].h_Data[j];
}
}
double cpuTime = GetTimer();
printf( " CPU Processing time: %f (ms)\n", cpuTime );
// Compare GPU and CPU results
printf( "Comparing GPU and Host CPU results...\n" );
diff = fabs( sumCPU - sumGPU ) / fabs( sumCPU );
printf( " GPU sum: %f\n CPU sum: %f\n", sumGPU, sumCPU );
printf( " Relative difference: %E \n", diff );
// Cleanup and shutdown
for( i = 0; i < GPU_N; i++ ) {
CHECK_CUDA_ERROR( hipSetDevice( i ) );
CHECK_CUDA_ERROR( hipHostFree( plan[i].h_Data ) );
hipDeviceReset();
}
exit( ( diff < 1e-5 ) ? EXIT_SUCCESS : EXIT_FAILURE );
}
|
954d30314d66b3e0b07683d9a0d3ca70976c6193.cu
|
/* PAPI Multiple GPU example. This example is taken from the NVIDIA
* documentation (Copyright 1993-2013 NVIDIA Corporation) and has been
* adapted to show the use of CUPTI and PAPI in collecting event
* counters for multiple GPU contexts. PAPI Team (2015)
*/
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This application demonstrates how to use the CUDA API to use multiple GPUs,
* with an emphasis on simple illustration of the techniques (not on performance).
*
* Note that in order to detect multiple GPUs in your system you have to disable
* SLI in the nvidia control panel. Otherwise only one GPU is visible to the
* application. On the other side, you can still extend your desktop to screens
* attached to both GPUs.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cupti.h>
#include <timer.h>
#include "papi_test.h"
#if not defined PAPI
#undef PAPI
#endif
#if not defined CUPTI_ONLY
#undef CUPTI_ONLY
#endif
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
#include "simpleMultiGPU.h"
// //////////////////////////////////////////////////////////////////////////////
// Data configuration
// //////////////////////////////////////////////////////////////////////////////
const int MAX_GPU_COUNT = 32;
const int DATA_N = 48576 * 32;
#ifdef PAPI
const int MAX_NUM_EVENTS = 32;
#endif
#define CHECK_CU_ERROR(err, cufunc) \
if (err != CUDA_SUCCESS) { printf ("Error %d for CUDA Driver API function '%s'\n", err, cufunc); return -1; }
#define CHECK_CUDA_ERROR(err) \
if (err != cudaSuccess) { printf ("Error %d for CUDA \n", err ); return -1; }
#define CHECK_CUPTI_ERROR(err, cuptifunc) \
if (err != CUPTI_SUCCESS) { printf ("Error %d for CUPTI API function '%s'\n", err, cuptifunc); return -1; }
// //////////////////////////////////////////////////////////////////////////////
// Simple reduction kernel.
// Refer to the 'reduction' CUDA SDK sample describing
// reduction optimization strategies
// //////////////////////////////////////////////////////////////////////////////
__global__ static void reduceKernel( float *d_Result, float *d_Input, int N )
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int threadN = gridDim.x * blockDim.x;
float sum = 0;
for( int pos = tid; pos < N; pos += threadN )
sum += d_Input[pos];
d_Result[tid] = sum;
}
// //////////////////////////////////////////////////////////////////////////////
// Program main
// //////////////////////////////////////////////////////////////////////////////
int main( int argc, char **argv )
{
// Solver config
TGPUplan plan[MAX_GPU_COUNT];
// GPU reduction results
float h_SumGPU[MAX_GPU_COUNT];
float sumGPU;
double sumCPU, diff;
int i, j, gpuBase, GPU_N;
const int BLOCK_N = 32;
const int THREAD_N = 256;
const int ACCUM_N = BLOCK_N * THREAD_N;
printf( "Starting simpleMultiGPU\n" );
// Report on the available CUDA devices
int computeCapabilityMajor = 0, computeCapabilityMinor = 0;
int runtimeVersion = 0, driverVersion = 0;
int deviceNum = -1;
char deviceName[32];
CUdevice dev;
CHECK_CUDA_ERROR( cudaGetDeviceCount( &GPU_N ) );
if( GPU_N > MAX_GPU_COUNT ) GPU_N = MAX_GPU_COUNT;
printf( "CUDA-capable device count: %i\n", GPU_N );
for ( deviceNum=0; deviceNum<GPU_N; deviceNum++ ) {
CHECK_CU_ERROR( cuDeviceGet( &dev, deviceNum ), "cuDeviceGet" );
CHECK_CU_ERROR( cuDeviceGetName( deviceName, 32, dev ), "cuDeviceGetName" );
CHECK_CU_ERROR( cuDeviceComputeCapability( &computeCapabilityMajor, &computeCapabilityMinor, dev ), "cuDeviceComputeCapability" );
cudaRuntimeGetVersion( &runtimeVersion );
cudaDriverGetVersion( &driverVersion );
printf( "CUDA Device %d: %s : computeCapability %d.%d runtimeVersion %d.%d driverVersion %d.%d\n", deviceNum, deviceName, computeCapabilityMajor, computeCapabilityMinor, runtimeVersion/1000, (runtimeVersion%100)/10, driverVersion/1000, (driverVersion%100)/10 );
if ( computeCapabilityMajor < 2 ) {
printf( "CUDA Device %d compute capability is too low... will not add any more GPUs\n", deviceNum );
GPU_N = deviceNum;
break;
}
}
uint32_t cupti_linked_version;
cuptiGetVersion( &cupti_linked_version );
printf("CUPTI version: Compiled against version %d; Linked against version %d\n", CUPTI_API_VERSION, cupti_linked_version );
printf( "Generating input data...\n" );
// Subdividing input data across GPUs
// Get data sizes for each GPU
for( i = 0; i < GPU_N; i++ )
plan[i].dataN = DATA_N / GPU_N;
// Take into account "odd" data sizes
for( i = 0; i < DATA_N % GPU_N; i++ )
plan[i].dataN++;
// Assign data ranges to GPUs
gpuBase = 0;
for( i = 0; i < GPU_N; i++ ) {
plan[i].h_Sum = h_SumGPU + i; // point within h_SumGPU array
gpuBase += plan[i].dataN;
}
// Create streams for issuing GPU command asynchronously and allocate memory (GPU and System page-locked)
for( i = 0; i < GPU_N; i++ ) {
CHECK_CUDA_ERROR( cudaSetDevice( i ) );
// cudaFree: forces creation of a context
CHECK_CUDA_ERROR( cudaFree( NULL ) );
CHECK_CUDA_ERROR( cudaStreamCreate( &plan[i].stream ) );
// Allocate memory
CHECK_CUDA_ERROR( cudaMalloc( ( void ** ) &plan[i].d_Data, plan[i].dataN * sizeof( float ) ) );
CHECK_CUDA_ERROR( cudaMalloc( ( void ** ) &plan[i].d_Sum, ACCUM_N * sizeof( float ) ) );
CHECK_CUDA_ERROR( cudaMallocHost( ( void ** ) &plan[i].h_Sum_from_device, ACCUM_N * sizeof( float ) ) );
CHECK_CUDA_ERROR( cudaMallocHost( ( void ** ) &plan[i].h_Data, plan[i].dataN * sizeof( float ) ) );
for( j = 0; j < plan[i].dataN; j++ ) {
plan[i].h_Data[j] = ( float ) rand() / ( float ) RAND_MAX;
}
}
#ifdef CUPTI_ONLY
printf("Setup CUPTI counters internally for elapsed_cycles_sm event (CUPTI_ONLY)\n");
CUdevice device[MAX_GPU_COUNT];
CUcontext ctx[MAX_GPU_COUNT];
CUcontext ctxpopped[MAX_GPU_COUNT];
CUpti_EventGroup eg[MAX_GPU_COUNT];
CUpti_EventID myevent;//elapsed cycles
for ( i=0; i<GPU_N; i++ ) {
CHECK_CUDA_ERROR( cudaSetDevice( i ) );
CHECK_CU_ERROR( cuDeviceGet( &device[i], i ), "cuDeviceGet" );
CHECK_CU_ERROR( cuCtxCreate( &ctx[i], 0, device[i] ), "cuCtxCreate" );
CHECK_CUPTI_ERROR( cuptiEventGroupCreate( ctx[i], &eg[i], 0 ), "cuptiEventGroupCreate" );
cuptiEventGetIdFromName ( device[i], "elapsed_cycles_sm", &myevent );
CHECK_CUPTI_ERROR( cuptiEventGroupAddEvent( eg[i], myevent ), "cuptiEventGroupAddEvent" );
CHECK_CUPTI_ERROR( cuptiEventGroupEnable( eg[i] ), "cuptiEventGroupEnable" );
CHECK_CU_ERROR( cuCtxPopCurrent( &ctxpopped[i] ), "cuCtxPopCurrent" );
}
#endif
#ifdef PAPI
printf("Setup PAPI counters internally (PAPI)\n");
int EventSet = PAPI_NULL;
int NUM_EVENTS = MAX_GPU_COUNT*MAX_NUM_EVENTS;
long long values[NUM_EVENTS];
int eventCount;
int retval, gg, ee;
/* PAPI Initialization */
retval = PAPI_library_init( PAPI_VER_CURRENT );
if( retval != PAPI_VER_CURRENT ) fprintf( stderr, "PAPI_library_init failed\n" );
printf( "PAPI version: %d.%d.%d\n", PAPI_VERSION_MAJOR( PAPI_VERSION ), PAPI_VERSION_MINOR( PAPI_VERSION ), PAPI_VERSION_REVISION( PAPI_VERSION ) );
retval = PAPI_create_eventset( &EventSet );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_create_eventset failed\n" );
// In this example measure 2 events from each GPU
int numEventEndings = 2;
static char *EventEndings[] = { (char*)"inst_executed", (char *)"elapsed_cycles_sm" };
// Add events at a GPU specific level ... eg cuda:::device:2:elapsed_cycles_sm
char *EventName[NUM_EVENTS];
char tmpEventName[50];
eventCount = 0;
for( gg = 0; gg < GPU_N; gg++ ) {
CHECK_CUDA_ERROR( cudaSetDevice( gg ) ); // Set device
for ( ee=0; ee<numEventEndings; ee++ ) {
snprintf( tmpEventName, 50, "cuda:::device:%d:%s\0", gg, EventEndings[ee] );
printf( "Trying to add event %s to GPU %d in PAPI...", tmpEventName , gg );
retval = PAPI_add_named_event( EventSet, tmpEventName );
if (retval==PAPI_OK) {
printf( "Added event\n" );
EventName[eventCount] = (char *)calloc( 50, sizeof(char) );
snprintf( EventName[eventCount], 50, "%s", tmpEventName );
eventCount++;
} else {
printf( "Could not add event\n" );
}
}
}
// Start PAPI event measurement
retval = PAPI_start( EventSet );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_start failed\n" );
#endif
// Start timing and compute on GPU(s)
printf( "Computing with %d GPUs...\n", GPU_N );
StartTimer();
// Copy data to GPU, launch the kernel and copy data back. All asynchronously
for( i = GPU_N-1; i >= 0; i-- ) {
// Set device
CHECK_CUDA_ERROR( cudaSetDevice( i ) );
//AYK CHECK_CUPTI_ERROR( cuptiEventGroupResetAllEvents ( eg[i] ), "cuptiEventGroupResetAllEvents" );
// Copy input data from CPU
CHECK_CUDA_ERROR( cudaMemcpyAsync( plan[i].d_Data, plan[i].h_Data, plan[i].dataN * sizeof( float ), cudaMemcpyHostToDevice, plan[i].stream ) );
// Perform GPU computations
reduceKernel <<< BLOCK_N, THREAD_N, 0, plan[i].stream >>> ( plan[i].d_Sum, plan[i].d_Data, plan[i].dataN );
if ( cudaGetLastError() != cudaSuccess ) { printf( "reduceKernel() execution failed (GPU %d).\n", i ); exit(EXIT_FAILURE); }
// Read back GPU results
CHECK_CUDA_ERROR( cudaMemcpyAsync( plan[i].h_Sum_from_device, plan[i].d_Sum, ACCUM_N * sizeof( float ), cudaMemcpyDeviceToHost, plan[i].stream ) );
}
// Process GPU results
printf( "Process GPU results on %d GPUs...\n", GPU_N );
for( i = 0; i < GPU_N; i++ ) {
float sum;
// Set device
CHECK_CUDA_ERROR( cudaSetDevice( i ) );
// Wait for all operations to finish
cudaStreamSynchronize( plan[i].stream );
// Finalize GPU reduction for current subvector
sum = 0;
for( j = 0; j < ACCUM_N; j++ ) {
sum += plan[i].h_Sum_from_device[j];
}
*( plan[i].h_Sum ) = ( float ) sum;
}
double gpuTime = GetTimer();
#ifdef CUPTI_ONLY
size_t size = 1024;
uint64_t buffer[1024];
for ( i=0; i<GPU_N; i++ ) {
CHECK_CUDA_ERROR( cudaSetDevice( i ) );
CHECK_CU_ERROR( cuCtxSynchronize( ), "cuCtxSynchronize" );
CHECK_CUPTI_ERROR( cuptiEventGroupReadEvent ( eg[i], CUPTI_EVENT_READ_FLAG_NONE, myevent, &size, &buffer[i] ), "cuptiEventGroupReadEvent" );
printf( "CUPTI elapsed_cycles_sm device %d counterValue %u\n", i, buffer[i] );
}
#endif
#ifdef PAPI
retval = PAPI_stop( EventSet, values );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_stop failed\n" );
for( i = 0; i < eventCount; i++ )
printf( "PAPI counterValue %12lld \t\t --> %s \n", values[i], EventName[i] );
retval = PAPI_cleanup_eventset( EventSet );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_cleanup_eventset failed\n" );
retval = PAPI_destroy_eventset( &EventSet );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_destroy_eventset failed\n" );
PAPI_shutdown();
#endif
for( i = 0; i < GPU_N; i++ ) {
CHECK_CUDA_ERROR( cudaFreeHost( plan[i].h_Sum_from_device ) );
CHECK_CUDA_ERROR( cudaFree( plan[i].d_Sum ) );
CHECK_CUDA_ERROR( cudaFree( plan[i].d_Data ) );
// Shut down this GPU
CHECK_CUDA_ERROR( cudaStreamDestroy( plan[i].stream ) );
}
sumGPU = 0;
for( i = 0; i < GPU_N; i++ ) {
sumGPU += h_SumGPU[i];
}
printf( " GPU Processing time: %f (ms)\n", gpuTime );
// Compute on Host CPU
printf( "Computing the same result with Host CPU...\n" );
StartTimer();
sumCPU = 0;
for( i = 0; i < GPU_N; i++ ) {
for( j = 0; j < plan[i].dataN; j++ ) {
sumCPU += plan[i].h_Data[j];
}
}
double cpuTime = GetTimer();
printf( " CPU Processing time: %f (ms)\n", cpuTime );
// Compare GPU and CPU results
printf( "Comparing GPU and Host CPU results...\n" );
diff = fabs( sumCPU - sumGPU ) / fabs( sumCPU );
printf( " GPU sum: %f\n CPU sum: %f\n", sumGPU, sumCPU );
printf( " Relative difference: %E \n", diff );
// Cleanup and shutdown
for( i = 0; i < GPU_N; i++ ) {
CHECK_CUDA_ERROR( cudaSetDevice( i ) );
CHECK_CUDA_ERROR( cudaFreeHost( plan[i].h_Data ) );
cudaDeviceReset();
}
exit( ( diff < 1e-5 ) ? EXIT_SUCCESS : EXIT_FAILURE );
}
|
79a1e66502ed781a64c3b249e8e1db8ed060966b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <algorithm>
#include <cstdlib>
#include <vector>
#include <numeric>
#include "Particles.h"
#include "CPU_PairCounts.h"
#include "GPU_PairCounts.h"
#include "gpu_utils.h"
using namespace std;
int main(int argc, char *argv[]) {
cout << "A simple test of the pair counting code" << endl;
// Do a simple check on the number of arguments
if (argc != 6) {
cout << "timing_shared_gpuonly Npart Nbins blockfac Nthreads niter\n";
exit(1);
}
// Get parameters
int Npart = atoi(argv[1]);
int Nbins = atoi(argv[2]);
int blockfac = atoi(argv[3]);
int Nthreads = atoi(argv[4]);
int niter = atoi(argv[5]);
cout << "Using N particles = " << Npart << endl;
cout << "Using N histogram bins = " << Nbins << endl;
// Determine CUDA properties
hipDeviceProp_t prop;
cuda_safe_call(hipGetDeviceProperties(&prop, 0));
int Nblocks = blockfac*prop.multiProcessorCount;
cout << "Using Nblocks :" << Nblocks << endl;
cout << "Using Nthreads :" << Nthreads << endl;
// Initialize the particles
CPUParticles p1, p2;
{
CPUclock t1;
p1.mkRandom(Npart, 111);
p2.mkRandom(Npart, 129);
double dt = t1.elapsed();
cout << "Time to generate random particles (ms) ::" << dt << endl;
}
// Move particles to the GPU
GPUParticles g1, g2;
{
GPUclock t1;
moveParticles(p1, g1);
moveParticles(p2, g2);
float dt = t1.elapsed();
cout << "Time to move the particles to the GPU (ms) ::" << dt << endl;
}
// Cache the timing information
vector<float> timing(niter);
for (int ii=0; ii < niter; ++ii)
// Pair counting on the GPU
{
// Set up the histogram
RHist gpu_rr(Nbins, 0.0, 1.0/static_cast<double>(Nbins));
GPUclock t1;
GPU_PairCounts::sharedR(Nblocks, Nthreads, g1, g2, gpu_rr);
timing[ii] = t1.elapsed();
}
// A little wasteful
cout << "Minimum time :" << *min_element(timing.begin(), timing.end()) << endl;
cout << "Maximum time :" << *max_element(timing.begin(), timing.end()) << endl;
cout << "Average time :" << accumulate(timing.begin(), timing.end(), 0.0)/static_cast<float>(niter) << endl;
// End
exit(0);
}
|
79a1e66502ed781a64c3b249e8e1db8ed060966b.cu
|
#include <iostream>
#include <algorithm>
#include <cstdlib>
#include <vector>
#include <numeric>
#include "Particles.h"
#include "CPU_PairCounts.h"
#include "GPU_PairCounts.h"
#include "gpu_utils.h"
using namespace std;
int main(int argc, char *argv[]) {
cout << "A simple test of the pair counting code" << endl;
// Do a simple check on the number of arguments
if (argc != 6) {
cout << "timing_shared_gpuonly Npart Nbins blockfac Nthreads niter\n";
exit(1);
}
// Get parameters
int Npart = atoi(argv[1]);
int Nbins = atoi(argv[2]);
int blockfac = atoi(argv[3]);
int Nthreads = atoi(argv[4]);
int niter = atoi(argv[5]);
cout << "Using N particles = " << Npart << endl;
cout << "Using N histogram bins = " << Nbins << endl;
// Determine CUDA properties
cudaDeviceProp prop;
cuda_safe_call(cudaGetDeviceProperties(&prop, 0));
int Nblocks = blockfac*prop.multiProcessorCount;
cout << "Using Nblocks :" << Nblocks << endl;
cout << "Using Nthreads :" << Nthreads << endl;
// Initialize the particles
CPUParticles p1, p2;
{
CPUclock t1;
p1.mkRandom(Npart, 111);
p2.mkRandom(Npart, 129);
double dt = t1.elapsed();
cout << "Time to generate random particles (ms) ::" << dt << endl;
}
// Move particles to the GPU
GPUParticles g1, g2;
{
GPUclock t1;
moveParticles(p1, g1);
moveParticles(p2, g2);
float dt = t1.elapsed();
cout << "Time to move the particles to the GPU (ms) ::" << dt << endl;
}
// Cache the timing information
vector<float> timing(niter);
for (int ii=0; ii < niter; ++ii)
// Pair counting on the GPU
{
// Set up the histogram
RHist gpu_rr(Nbins, 0.0, 1.0/static_cast<double>(Nbins));
GPUclock t1;
GPU_PairCounts::sharedR(Nblocks, Nthreads, g1, g2, gpu_rr);
timing[ii] = t1.elapsed();
}
// A little wasteful
cout << "Minimum time :" << *min_element(timing.begin(), timing.end()) << endl;
cout << "Maximum time :" << *max_element(timing.begin(), timing.end()) << endl;
cout << "Average time :" << accumulate(timing.begin(), timing.end(), 0.0)/static_cast<float>(niter) << endl;
// End
exit(0);
}
|
591300033e442bf7097f8cccb21517be8f96e19e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated ds Tue Aug 13 16:45:16 2013
*/
#include "common_magma.h"
#define PRECISION_d
#define BLOCK_SIZE 32
//#define num_threads 64
#define dgemv_bs 32
#if (!defined(PRECISION_z)) || (GPUSHMEM >= 200)
/*------------------------------------------ UPLO = 'L' ----------------------------------*/
static __device__ int flag = 0;
__global__ void
l_dlat2s_special(
int n,
const double *A, int lda,
float *SA,
magma_int_t *info, double RMAX, int ldsa )
{
double mRMAX = - RMAX;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
__shared__ double la[dgemv_bs][dgemv_bs+1];
A += ind;
SA += ind;
A += ty * lda;
SA += ty * ldsa;
int break_d = blockIdx.x* dgemv_bs ;
double temp ;
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs; j+=4){
temp = A[j*lda] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = (float)(temp);
}
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4)
la[ty+j][tx] = A[j*lda];
__syncthreads();
A += dgemv_bs ;
#pragma unroll 8
for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){
if ( i < tx ) {
la[tx][i] = la[i][tx] ;
}
else
la[tx][i] = la[tx][i] ;
}
__syncthreads();
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
temp = la[ty+j][tx] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = (float)(temp);
}
__syncthreads();
//la[tx][ty] = flag ;
//__syncthreads();
//if( ty == 0 ) {
// // info[0] = flag+ la[tx] [1] + la[tx] [2] + la[tx] [3] ;
//}
}
__global__ void
l_dlat2s_generic(
int n,
const double *A, int lda,
float *SA,
int m_full_block, int m_mod_32, magma_int_t *info, double RMAX, int ldsa)
{
double mRMAX = - RMAX;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
__shared__ double la [dgemv_bs][dgemv_bs+1];
double temp ;
if( blockIdx.x == m_full_block ) {
/************************************************************************
-- Last block --
-- We will do something unusual here
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
if ( tx < m_mod_32 ){
A+= ( blockIdx.x * dgemv_bs + tx ) ;
SA+= ( blockIdx.x * dgemv_bs + tx ) ;
}
else{
A+= ( blockIdx.x * dgemv_bs + m_mod_32 -1) ;
SA+= ( blockIdx.x * dgemv_bs + m_mod_32 -1) ;
}
A+= ty * lda ;
SA+= ty * ldsa ;
int break_d = blockIdx.x* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
temp = A[j*lda] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = (float)(temp);
}
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
/*
we don't need to make zero, as those computation will be discarded.
*/
if( ty==0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j ;
int count = 1 ;
if( tx < m_mod_32 )
count = tx ;
else
count = m_mod_32 ;
for(j =0; j<=count; j++){
temp = A[j*lda] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = (float)(temp);
}
A += (tx)*lda;
SA += (tx)*ldsa;
count = 1 ;
for(; j<m_mod_32; j++){
temp= A[count] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[count] = (float)(temp);
count++;
}
}
else{
}
//la[tx][ty] = flag ;
__syncthreads();
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
//if( ty == 0 ) {
// info[ind] = ld[tx][0] + ld[tx][1] + ld[tx][2] + ld[tx][3] ;
//}
}
else{
/***************************************
-----------------------------------
-- All the blocks but the last one --
****************************************
-------------------------------------*/
A += ind;
SA += ind;
A+= ty * lda ;
SA+= ty * ldsa ;
int break_d = blockIdx.x* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
temp = A[j*lda] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = (float)(temp);
}
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
/*------------------------------------
Diagonal
Copy + Transpose lower triangle
--------------------------------------*/
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4)
la[ty+j][tx] = A[ j * lda];
A+= dgemv_bs ;
__syncthreads();
/*--------------------------------------------
Mirror Upper Triangle to Lower triangle
---------------------------------------------*/
#pragma unroll 8
for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){
if ( i < tx ) {
la[tx][i] = la[i][tx] ;
}
else
la[tx][i] = la[tx][i] ;
}
__syncthreads();
/*--------------------------------
Do diagonal Computation
-----------------------------------*/
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
temp = la[ty+j][tx] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = (float)(temp);
}
__syncthreads();
//la[tx] [ty ] = flag ;
//__syncthreads();
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
//if( ty == 0 )
// {
// info [ind] = flag + la[tx][1]+ la[tx][2]+ la[tx][3] ;
// }
}
}
/*- ---------------------------------------------- UPLO = 'U' ----------------------------------*/
/* Generic Case*/
__global__ void
u_dlat2s_generic(
int n,
const double *A, int lda,
float *SA,
int m_full_block, int m_mod_32, magma_int_t *info, double RMAX, int ldsa)
{
double mRMAX = - RMAX;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
__shared__ double la [dgemv_bs][dgemv_bs+1];
int blockIdxx = blockIdx.x ;
double temp ;
if( blockIdx.x == m_full_block ) {
/************************************************************************
-- Last block --
-- We will do something unusual here
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
ind = tx ;
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
if ( tx < m_mod_32 ){
A+= ( tx ) ;
SA+= ( tx ) ;
}
else{
A+= ( m_mod_32 -1) ;
SA+= ( m_mod_32 -1) ;
}
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (blockIdx.x)* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
temp = A[-j*lda] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-j*ldsa] = (float)(temp);
}
A -= lda *dgemv_bs ;
SA -= ldsa*dgemv_bs ;
}
/*
we don't need to make zero, as those computation will be discarded.
*/
if( ty==0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j ;
int count = 1 ;
if( tx < m_mod_32 )
count =m_mod_32- tx ;
else
count = m_mod_32 ;
for(j =0; j<count; j++){
temp = A[-j*lda] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-j*ldsa] = (float)(temp);
}
A-=(count-1)*lda;
SA-=(count-1)*ldsa;
count = 1 ;
for(; j<m_mod_32; j++){
temp = A[-count] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-count] = (float)(temp);
count++;
}
}
else{
}
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
//la[tx][ty] = flag ;
//__syncthreads();
//if( ty == 0 ) {
// // info [ind] = flag + la[tx][1] + la[tx][2] + la[tx][3] ;
//}
}
else{
/***************************************
-----------------------------------
-- All the blocks but the last one --
-- By the way this code can be optimized more.
****************************************
-------------------------------------*/
ind = blockIdx.x * dgemv_bs + tx + m_mod_32 ;
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
A += ind;
SA += ind;
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (n / dgemv_bs - blockIdxx-1 )* dgemv_bs ;
/*----------------------------
Go Left
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
temp = A[-j*lda] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-j*ldsa] = (float)(temp);
}
A-=lda* dgemv_bs ;
SA-=ldsa* dgemv_bs ;
}
/*------------------------------------
Diagonal
Copy + Transpose lower triangle
--------------------------------------*/
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
la[tx][31-ty-j] = A[ -j * lda];
}
A-= dgemv_bs ;
__syncthreads();
/*--------------------------------------------
Mirror Upper Triangle to Lower triangle
---------------------------------------------*/
#pragma unroll 8
for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){
if ( i <tx ){
la[tx][i] = la[i][tx];
}
else{
la[tx][i] = la[tx][i] ;
}
}
__syncthreads();
/*--------------------------------
Do diagonal Computation
-----------------------------------*/
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
temp = la[tx][31-ty-j];
// temp = la[ty+j][tx] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[- j*ldsa] = (float)(temp);
}
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
//la[tx] [ty] = flag ;
//__syncthreads();
//
//if( ty == 0 ) {
// // info[ind] = flag + la[tx] [1] + la[tx] [2] + la[tx] [3] ;
//}
}
}
/*- ---------------------------------------------- UPLO = 'U' ----------------------------------*/
/*Good Dimension*/
__global__ void
u_dlat2s_special (
int n,
const double *A, int lda,
float *SA,
magma_int_t *info, double RMAX, int ldsa )
{
double mRMAX = - RMAX;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
/*
Reverse Computation ...
- Left
- Triangle
- Up
*/
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
__shared__ double la [dgemv_bs][dgemv_bs+1];
A += ind;
SA += ind;
A-= ty * lda ;
SA-= ty * ldsa ;
double temp ;
int break_d = (n / dgemv_bs - blockIdx.x-1 )* dgemv_bs ;
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
// la[tx][ty+j] = A[-j*lda] ;
temp = A[-j*lda] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-j*ldsa] = (float)(temp);
}
A-=lda* dgemv_bs ;
SA-=ldsa* dgemv_bs ;
}
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4)
la[tx][31-ty-j] = A[ -j * lda];
/*
Look at the indexing changes
*/
A-= dgemv_bs ;
__syncthreads();
#pragma unroll 8
for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){
if ( i <tx ){
la[tx][i] = la[i][tx];
}
else{
la[tx][i] = la[tx][i] ;
}
}
__syncthreads();
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
temp = la[tx][31-ty-j];
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[- j*ldsa] = (float)(temp);
}
//la[tx][ty] = flag ;
//
//__syncthreads();
//
//if( ty == 0 ) {
// // info[0] = flag + la[tx][1] + la[tx][2] + la[tx][3] ;
//}
}
extern "C" void
mdlat2s(
char uplo, magma_int_t m,
const double *A, magma_int_t lda,
float *SA, magma_int_t ldsa,
magma_int_t *info)
{
/*
Note:
The UPLO = 'U' Version can be optimized more.
*/
double RMAX = (double)lapackf77_slamch("O");
int blocks;
if (m % dgemv_bs==0)
blocks = m/ dgemv_bs;
else
blocks = m/ dgemv_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(32, 4, 1);
if( m % dgemv_bs == 0 ) {
if( uplo == 'L' || uplo == 'l'){
hipLaunchKernelGGL(( l_dlat2s_special) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, SA, info, RMAX, ldsa );
}
else{
hipLaunchKernelGGL(( u_dlat2s_special) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, SA, info, RMAX, ldsa );
}
}
else{
int m_full_block = (m - m % 32 ) /32 ;
int m_mod_32 = m%32 ;
if( uplo == 'L' || uplo == 'l'){
hipLaunchKernelGGL(( l_dlat2s_generic) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, SA, m_full_block, m_mod_32, info, RMAX, ldsa );
}
else{
hipLaunchKernelGGL(( u_dlat2s_generic) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, SA, m_full_block, m_mod_32, info, RMAX, ldsa );
}
}
}
/*
Interface ..................................
Reproduced from dlansy routines...
How to deliver the info.
*/
extern "C" void
magmablas_dlat2s(
char uplo, magma_int_t n,
const double *A, magma_int_t lda,
float *SA, magma_int_t ldsa,
magma_int_t *info)
{
/*
The routine converts a DOUBLE PRECISION triangular
matrix A to SINGLE PRECISION triangular matrix SA.
*/
*info = 0;
mdlat2s( uplo, n, A, lda, SA, ldsa, info );
}
///////////////////////////////////////////////////////////////////////////////////////////
#else
///////////////////////////////////////////////////////////////////////////////////////////
/*------------------------------------------ UPLO = 'L' ----------------------------------*/
__global__ void
l_dlat2s_special (
int n,
const double *A, int lda,
float *SA,
magma_int_t *info, double RMAX, int ldsa )
{
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
A += ind;
SA += ind;
A += ty * lda;
SA += ty * ldsa;
int break_d = (blockIdx.x+1)* dgemv_bs ;
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[j*ldsa] = (float)(A[j*lda]);
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
}
__global__ void
l_dlat2s_generic(
int n,
const double *A, int lda,
float *SA,
int m_full_block, int m_mod_32, magma_int_t *info, double RMAX, int ldsa)
{
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
if( blockIdx.x == m_full_block ) {
/************************************************************************
-- Last block --
-- We will do something unusual here
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
if ( tx < m_mod_32 ){
A+= ( blockIdx.x * dgemv_bs + tx ) ;
SA+= ( blockIdx.x * dgemv_bs + tx ) ;
}
else{
A+= ( blockIdx.x * dgemv_bs + m_mod_32 -1) ;
SA+= ( blockIdx.x * dgemv_bs + m_mod_32 -1) ;
}
A+= ty * lda ;
SA+= ty * ldsa ;
int break_d = blockIdx.x* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[j*ldsa] = (float)(A[j*lda]);
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
/*
we don't need to make zero, as those computation will be discarded.
*/
if( ty==0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j ;
int count = 1 ;
if( tx < m_mod_32 )
count = tx ;
else
count = m_mod_32 ;
for(j =0; j<=count; j++)
SA[j*ldsa] = (float)(A[j*lda]);
A += (tx)*lda;
SA += (tx)*ldsa;
count = 1 ;
for(; j<m_mod_32; j++){
SA[count] = (float)(A[count]);
count++;
}
}
else{
}
__syncthreads();
}
else{
/* **************************************
-- All the blocks but the last one --
************************************** */
A += ind;
SA += ind;
A+= ty * lda ;
SA+= ty * ldsa ;
int break_d = (blockIdx.x+1)* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[j*ldsa] = (float)(A[j*lda]);
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
}
}
/*- ---------------------------------------------- UPLO = 'U' ----------------------------------*/
/* Generic Case*/
__global__ void
u_dlat2s_generic(
int n,
const double *A, int lda,
float *SA,
int m_full_block, int m_mod_32, magma_int_t *info, double RMAX, int ldsa)
{
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
int blockIdxx = blockIdx.x ;
if( blockIdx.x == m_full_block ) {
/************************************************************************
-- Last block --
-- We will do something unusual here
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
ind = tx ;
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
if ( tx < m_mod_32 ){
A+= ( tx ) ;
SA+= ( tx ) ;
}
else{
A+= ( m_mod_32 -1) ;
SA+= ( m_mod_32 -1) ;
}
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (blockIdx.x)* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[-j*ldsa] = (float)(A[-j*lda]);
A -= lda *dgemv_bs ;
SA -= ldsa*dgemv_bs ;
}
/*
we don't need to make zero, as those computation will be discarded.
*/
if( ty==0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j ;
int count = 1 ;
if( tx < m_mod_32 )
count =m_mod_32- tx ;
else
count = m_mod_32 ;
for(j =0; j<count; j++)
SA[-j*ldsa] = (float)(A[-j*lda]);
A-=(count-1)*lda;
SA-=(count-1)*ldsa;
count = 1 ;
for(; j<m_mod_32; j++){
SA[-count] = (float)(A[-count]);
count++;
}
}
else{
}
}
else{
/* **************************************
-- All the blocks but the last one --
-- By the way this code can be optimized more.
************************************** */
ind = blockIdx.x * dgemv_bs + tx + m_mod_32 ;
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
A += ind;
SA += ind;
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (n / dgemv_bs - blockIdxx )* dgemv_bs ;
/*----------------------------
Go Left
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[-j*ldsa] = (float)(A[-j*lda]);
A-=lda* dgemv_bs ;
SA-=ldsa* dgemv_bs ;
}
}
}
/*- ---------------------------------------------- UPLO = 'U' ----------------------------------*/
/*Good Dimension*/
__global__ void
u_dlat2s_special (
int n,
const double *A, int lda,
float *SA,
magma_int_t *info, double RMAX, int ldsa )
{
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
/*
Reverse Computation ...
- Left
- Triangle
- Up
*/
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
A += ind;
SA += ind;
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (n / dgemv_bs - blockIdx.x )* dgemv_bs ;
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[-j*ldsa] = (float)(A[-j*lda]);
A-=lda* dgemv_bs ;
SA-=ldsa* dgemv_bs ;
}
}
extern "C" void
mdlat2s(
char uplo, magma_int_t m,
const double *A, magma_int_t lda,
float *SA, magma_int_t ldsa,
magma_int_t *info)
{
/*
Note:
The UPLO = 'U' Version can be optimized more.
*/
double RMAX = (double)lapackf77_slamch("O");
int blocks;
if (m % dgemv_bs==0)
blocks = m/ dgemv_bs;
else
blocks = m/ dgemv_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(32, 4, 1);
if( m % dgemv_bs == 0 ) {
if( uplo == 'L' || uplo == 'l'){
hipLaunchKernelGGL(( l_dlat2s_special) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, SA, info, RMAX, ldsa );
}
else{
hipLaunchKernelGGL(( u_dlat2s_special) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, SA, info, RMAX, ldsa );
}
}
else{
int m_full_block = (m - m % 32 ) /32 ;
int m_mod_32 = m%32 ;
if( uplo == 'L' || uplo == 'l'){
hipLaunchKernelGGL(( l_dlat2s_generic) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, SA, m_full_block, m_mod_32, info, RMAX, ldsa );
}
else{
hipLaunchKernelGGL(( u_dlat2s_generic) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, SA, m_full_block, m_mod_32, info, RMAX, ldsa );
}
}
}
/*
Interface ..................................
Reproduced from dlansy routines...
How to deliver the info.
*/
extern "C" void
magmablas_dlat2s(
char uplo, magma_int_t n,
const double *A, magma_int_t lda,
float *SA, magma_int_t ldsa,
magma_int_t *info)
{
/*
The routine converts a DOUBLE PRECISION triangular
matrix A (along with its block diagonal entries) to a SINGLE PRECISION
triangular matrix SA (along with its block diagonal entries).
*/
*info = 0;
mdlat2s( uplo, n, A, lda, SA, ldsa, info );
/*
int val = hipblasIdamax(n, WORK, 1);
double retVal[1];
hipblasGetMatrix( 1, 1, sizeof( double ), WORK+val-1, 1, retVal, 1 ) ;
return retVal[0];
*/
}
#endif /* (!defined(PRECISION_z)) || (GPUSHMEM >= 200) */
|
591300033e442bf7097f8cccb21517be8f96e19e.cu
|
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated ds Tue Aug 13 16:45:16 2013
*/
#include "common_magma.h"
#define PRECISION_d
#define BLOCK_SIZE 32
//#define num_threads 64
#define dgemv_bs 32
#if (!defined(PRECISION_z)) || (GPUSHMEM >= 200)
/*------------------------------------------ UPLO = 'L' ----------------------------------*/
static __device__ int flag = 0;
__global__ void
l_dlat2s_special(
int n,
const double *A, int lda,
float *SA,
magma_int_t *info, double RMAX, int ldsa )
{
double mRMAX = - RMAX;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
__shared__ double la[dgemv_bs][dgemv_bs+1];
A += ind;
SA += ind;
A += ty * lda;
SA += ty * ldsa;
int break_d = blockIdx.x* dgemv_bs ;
double temp ;
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs; j+=4){
temp = A[j*lda] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = (float)(temp);
}
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4)
la[ty+j][tx] = A[j*lda];
__syncthreads();
A += dgemv_bs ;
#pragma unroll 8
for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){
if ( i < tx ) {
la[tx][i] = la[i][tx] ;
}
else
la[tx][i] = la[tx][i] ;
}
__syncthreads();
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
temp = la[ty+j][tx] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = (float)(temp);
}
__syncthreads();
//la[tx][ty] = flag ;
//__syncthreads();
//if( ty == 0 ) {
// // info[0] = flag+ la[tx] [1] + la[tx] [2] + la[tx] [3] ;
//}
}
__global__ void
l_dlat2s_generic(
int n,
const double *A, int lda,
float *SA,
int m_full_block, int m_mod_32, magma_int_t *info, double RMAX, int ldsa)
{
double mRMAX = - RMAX;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
__shared__ double la [dgemv_bs][dgemv_bs+1];
double temp ;
if( blockIdx.x == m_full_block ) {
/************************************************************************
-- Last block --
-- We will do something unusual here
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
if ( tx < m_mod_32 ){
A+= ( blockIdx.x * dgemv_bs + tx ) ;
SA+= ( blockIdx.x * dgemv_bs + tx ) ;
}
else{
A+= ( blockIdx.x * dgemv_bs + m_mod_32 -1) ;
SA+= ( blockIdx.x * dgemv_bs + m_mod_32 -1) ;
}
A+= ty * lda ;
SA+= ty * ldsa ;
int break_d = blockIdx.x* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
temp = A[j*lda] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = (float)(temp);
}
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
/*
we don't need to make zero, as those computation will be discarded.
*/
if( ty==0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j ;
int count = 1 ;
if( tx < m_mod_32 )
count = tx ;
else
count = m_mod_32 ;
for(j =0; j<=count; j++){
temp = A[j*lda] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = (float)(temp);
}
A += (tx)*lda;
SA += (tx)*ldsa;
count = 1 ;
for(; j<m_mod_32; j++){
temp= A[count] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[count] = (float)(temp);
count++;
}
}
else{
}
//la[tx][ty] = flag ;
__syncthreads();
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
//if( ty == 0 ) {
// info[ind] = ld[tx][0] + ld[tx][1] + ld[tx][2] + ld[tx][3] ;
//}
}
else{
/***************************************
-----------------------------------
-- All the blocks but the last one --
****************************************
-------------------------------------*/
A += ind;
SA += ind;
A+= ty * lda ;
SA+= ty * ldsa ;
int break_d = blockIdx.x* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
temp = A[j*lda] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = (float)(temp);
}
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
/*------------------------------------
Diagonal
Copy + Transpose lower triangle
--------------------------------------*/
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4)
la[ty+j][tx] = A[ j * lda];
A+= dgemv_bs ;
__syncthreads();
/*--------------------------------------------
Mirror Upper Triangle to Lower triangle
---------------------------------------------*/
#pragma unroll 8
for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){
if ( i < tx ) {
la[tx][i] = la[i][tx] ;
}
else
la[tx][i] = la[tx][i] ;
}
__syncthreads();
/*--------------------------------
Do diagonal Computation
-----------------------------------*/
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
temp = la[ty+j][tx] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = (float)(temp);
}
__syncthreads();
//la[tx] [ty ] = flag ;
//__syncthreads();
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
//if( ty == 0 )
// {
// info [ind] = flag + la[tx][1]+ la[tx][2]+ la[tx][3] ;
// }
}
}
/*- ---------------------------------------------- UPLO = 'U' ----------------------------------*/
/* Generic Case*/
__global__ void
u_dlat2s_generic(
int n,
const double *A, int lda,
float *SA,
int m_full_block, int m_mod_32, magma_int_t *info, double RMAX, int ldsa)
{
double mRMAX = - RMAX;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
__shared__ double la [dgemv_bs][dgemv_bs+1];
int blockIdxx = blockIdx.x ;
double temp ;
if( blockIdx.x == m_full_block ) {
/************************************************************************
-- Last block --
-- We will do something unusual here
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
ind = tx ;
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
if ( tx < m_mod_32 ){
A+= ( tx ) ;
SA+= ( tx ) ;
}
else{
A+= ( m_mod_32 -1) ;
SA+= ( m_mod_32 -1) ;
}
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (blockIdx.x)* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
temp = A[-j*lda] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-j*ldsa] = (float)(temp);
}
A -= lda *dgemv_bs ;
SA -= ldsa*dgemv_bs ;
}
/*
we don't need to make zero, as those computation will be discarded.
*/
if( ty==0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j ;
int count = 1 ;
if( tx < m_mod_32 )
count =m_mod_32- tx ;
else
count = m_mod_32 ;
for(j =0; j<count; j++){
temp = A[-j*lda] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-j*ldsa] = (float)(temp);
}
A-=(count-1)*lda;
SA-=(count-1)*ldsa;
count = 1 ;
for(; j<m_mod_32; j++){
temp = A[-count] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-count] = (float)(temp);
count++;
}
}
else{
}
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
//la[tx][ty] = flag ;
//__syncthreads();
//if( ty == 0 ) {
// // info [ind] = flag + la[tx][1] + la[tx][2] + la[tx][3] ;
//}
}
else{
/***************************************
-----------------------------------
-- All the blocks but the last one --
-- By the way this code can be optimized more.
****************************************
-------------------------------------*/
ind = blockIdx.x * dgemv_bs + tx + m_mod_32 ;
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
A += ind;
SA += ind;
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (n / dgemv_bs - blockIdxx-1 )* dgemv_bs ;
/*----------------------------
Go Left
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
temp = A[-j*lda] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-j*ldsa] = (float)(temp);
}
A-=lda* dgemv_bs ;
SA-=ldsa* dgemv_bs ;
}
/*------------------------------------
Diagonal
Copy + Transpose lower triangle
--------------------------------------*/
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
la[tx][31-ty-j] = A[ -j * lda];
}
A-= dgemv_bs ;
__syncthreads();
/*--------------------------------------------
Mirror Upper Triangle to Lower triangle
---------------------------------------------*/
#pragma unroll 8
for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){
if ( i <tx ){
la[tx][i] = la[i][tx];
}
else{
la[tx][i] = la[tx][i] ;
}
}
__syncthreads();
/*--------------------------------
Do diagonal Computation
-----------------------------------*/
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
temp = la[tx][31-ty-j];
// temp = la[ty+j][tx] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[- j*ldsa] = (float)(temp);
}
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
//la[tx] [ty] = flag ;
//__syncthreads();
//
//if( ty == 0 ) {
// // info[ind] = flag + la[tx] [1] + la[tx] [2] + la[tx] [3] ;
//}
}
}
/*- ---------------------------------------------- UPLO = 'U' ----------------------------------*/
/*Good Dimension*/
__global__ void
u_dlat2s_special (
int n,
const double *A, int lda,
float *SA,
magma_int_t *info, double RMAX, int ldsa )
{
double mRMAX = - RMAX;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
/*
Reverse Computation ...
- Left
- Triangle
- Up
*/
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
__shared__ double la [dgemv_bs][dgemv_bs+1];
A += ind;
SA += ind;
A-= ty * lda ;
SA-= ty * ldsa ;
double temp ;
int break_d = (n / dgemv_bs - blockIdx.x-1 )* dgemv_bs ;
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
// la[tx][ty+j] = A[-j*lda] ;
temp = A[-j*lda] ;
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-j*ldsa] = (float)(temp);
}
A-=lda* dgemv_bs ;
SA-=ldsa* dgemv_bs ;
}
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4)
la[tx][31-ty-j] = A[ -j * lda];
/*
Look at the indexing changes
*/
A-= dgemv_bs ;
__syncthreads();
#pragma unroll 8
for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){
if ( i <tx ){
la[tx][i] = la[i][tx];
}
else{
la[tx][i] = la[tx][i] ;
}
}
__syncthreads();
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
temp = la[tx][31-ty-j];
if( ((temp) < mRMAX) || ((temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((temp) < mRMAX) || ((temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[- j*ldsa] = (float)(temp);
}
//la[tx][ty] = flag ;
//
//__syncthreads();
//
//if( ty == 0 ) {
// // info[0] = flag + la[tx][1] + la[tx][2] + la[tx][3] ;
//}
}
extern "C" void
mdlat2s(
char uplo, magma_int_t m,
const double *A, magma_int_t lda,
float *SA, magma_int_t ldsa,
magma_int_t *info)
{
/*
Note:
The UPLO = 'U' Version can be optimized more.
*/
double RMAX = (double)lapackf77_slamch("O");
int blocks;
if (m % dgemv_bs==0)
blocks = m/ dgemv_bs;
else
blocks = m/ dgemv_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(32, 4, 1);
if( m % dgemv_bs == 0 ) {
if( uplo == 'L' || uplo == 'l'){
l_dlat2s_special <<< grid, threads, 0, magma_stream >>> (m, A, lda, SA, info, RMAX, ldsa );
}
else{
u_dlat2s_special <<< grid, threads, 0, magma_stream >>> (m, A, lda, SA, info, RMAX, ldsa );
}
}
else{
int m_full_block = (m - m % 32 ) /32 ;
int m_mod_32 = m%32 ;
if( uplo == 'L' || uplo == 'l'){
l_dlat2s_generic <<< grid, threads, 0, magma_stream >>> (m, A, lda, SA, m_full_block, m_mod_32, info, RMAX, ldsa );
}
else{
u_dlat2s_generic <<< grid, threads, 0, magma_stream >>> (m, A, lda, SA, m_full_block, m_mod_32, info, RMAX, ldsa );
}
}
}
/*
Interface ..................................
Reproduced from dlansy routines...
How to deliver the info.
*/
extern "C" void
magmablas_dlat2s(
char uplo, magma_int_t n,
const double *A, magma_int_t lda,
float *SA, magma_int_t ldsa,
magma_int_t *info)
{
/*
The routine converts a DOUBLE PRECISION triangular
matrix A to SINGLE PRECISION triangular matrix SA.
*/
*info = 0;
mdlat2s( uplo, n, A, lda, SA, ldsa, info );
}
///////////////////////////////////////////////////////////////////////////////////////////
#else
///////////////////////////////////////////////////////////////////////////////////////////
/*------------------------------------------ UPLO = 'L' ----------------------------------*/
__global__ void
l_dlat2s_special (
int n,
const double *A, int lda,
float *SA,
magma_int_t *info, double RMAX, int ldsa )
{
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
A += ind;
SA += ind;
A += ty * lda;
SA += ty * ldsa;
int break_d = (blockIdx.x+1)* dgemv_bs ;
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[j*ldsa] = (float)(A[j*lda]);
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
}
__global__ void
l_dlat2s_generic(
int n,
const double *A, int lda,
float *SA,
int m_full_block, int m_mod_32, magma_int_t *info, double RMAX, int ldsa)
{
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
if( blockIdx.x == m_full_block ) {
/************************************************************************
-- Last block --
-- We will do something unusual here
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
if ( tx < m_mod_32 ){
A+= ( blockIdx.x * dgemv_bs + tx ) ;
SA+= ( blockIdx.x * dgemv_bs + tx ) ;
}
else{
A+= ( blockIdx.x * dgemv_bs + m_mod_32 -1) ;
SA+= ( blockIdx.x * dgemv_bs + m_mod_32 -1) ;
}
A+= ty * lda ;
SA+= ty * ldsa ;
int break_d = blockIdx.x* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[j*ldsa] = (float)(A[j*lda]);
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
/*
we don't need to make zero, as those computation will be discarded.
*/
if( ty==0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j ;
int count = 1 ;
if( tx < m_mod_32 )
count = tx ;
else
count = m_mod_32 ;
for(j =0; j<=count; j++)
SA[j*ldsa] = (float)(A[j*lda]);
A += (tx)*lda;
SA += (tx)*ldsa;
count = 1 ;
for(; j<m_mod_32; j++){
SA[count] = (float)(A[count]);
count++;
}
}
else{
}
__syncthreads();
}
else{
/* **************************************
-- All the blocks but the last one --
************************************** */
A += ind;
SA += ind;
A+= ty * lda ;
SA+= ty * ldsa ;
int break_d = (blockIdx.x+1)* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[j*ldsa] = (float)(A[j*lda]);
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
}
}
/*- ---------------------------------------------- UPLO = 'U' ----------------------------------*/
/* Generic Case*/
__global__ void
u_dlat2s_generic(
int n,
const double *A, int lda,
float *SA,
int m_full_block, int m_mod_32, magma_int_t *info, double RMAX, int ldsa)
{
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
int blockIdxx = blockIdx.x ;
if( blockIdx.x == m_full_block ) {
/************************************************************************
-- Last block --
-- We will do something unusual here
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
ind = tx ;
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
if ( tx < m_mod_32 ){
A+= ( tx ) ;
SA+= ( tx ) ;
}
else{
A+= ( m_mod_32 -1) ;
SA+= ( m_mod_32 -1) ;
}
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (blockIdx.x)* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[-j*ldsa] = (float)(A[-j*lda]);
A -= lda *dgemv_bs ;
SA -= ldsa*dgemv_bs ;
}
/*
we don't need to make zero, as those computation will be discarded.
*/
if( ty==0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j ;
int count = 1 ;
if( tx < m_mod_32 )
count =m_mod_32- tx ;
else
count = m_mod_32 ;
for(j =0; j<count; j++)
SA[-j*ldsa] = (float)(A[-j*lda]);
A-=(count-1)*lda;
SA-=(count-1)*ldsa;
count = 1 ;
for(; j<m_mod_32; j++){
SA[-count] = (float)(A[-count]);
count++;
}
}
else{
}
}
else{
/* **************************************
-- All the blocks but the last one --
-- By the way this code can be optimized more.
************************************** */
ind = blockIdx.x * dgemv_bs + tx + m_mod_32 ;
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
A += ind;
SA += ind;
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (n / dgemv_bs - blockIdxx )* dgemv_bs ;
/*----------------------------
Go Left
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[-j*ldsa] = (float)(A[-j*lda]);
A-=lda* dgemv_bs ;
SA-=ldsa* dgemv_bs ;
}
}
}
/*- ---------------------------------------------- UPLO = 'U' ----------------------------------*/
/*Good Dimension*/
__global__ void
u_dlat2s_special (
int n,
const double *A, int lda,
float *SA,
magma_int_t *info, double RMAX, int ldsa )
{
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
/*
Reverse Computation ...
- Left
- Triangle
- Up
*/
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
A += ind;
SA += ind;
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (n / dgemv_bs - blockIdx.x )* dgemv_bs ;
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[-j*ldsa] = (float)(A[-j*lda]);
A-=lda* dgemv_bs ;
SA-=ldsa* dgemv_bs ;
}
}
extern "C" void
mdlat2s(
char uplo, magma_int_t m,
const double *A, magma_int_t lda,
float *SA, magma_int_t ldsa,
magma_int_t *info)
{
/*
Note:
The UPLO = 'U' Version can be optimized more.
*/
double RMAX = (double)lapackf77_slamch("O");
int blocks;
if (m % dgemv_bs==0)
blocks = m/ dgemv_bs;
else
blocks = m/ dgemv_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(32, 4, 1);
if( m % dgemv_bs == 0 ) {
if( uplo == 'L' || uplo == 'l'){
l_dlat2s_special <<< grid, threads, 0, magma_stream >>> (m, A, lda, SA, info, RMAX, ldsa );
}
else{
u_dlat2s_special <<< grid, threads, 0, magma_stream >>> (m, A, lda, SA, info, RMAX, ldsa );
}
}
else{
int m_full_block = (m - m % 32 ) /32 ;
int m_mod_32 = m%32 ;
if( uplo == 'L' || uplo == 'l'){
l_dlat2s_generic <<< grid, threads, 0, magma_stream >>> (m, A, lda, SA, m_full_block, m_mod_32, info, RMAX, ldsa );
}
else{
u_dlat2s_generic <<< grid, threads, 0, magma_stream >>> (m, A, lda, SA, m_full_block, m_mod_32, info, RMAX, ldsa );
}
}
}
/*
Interface ..................................
Reproduced from dlansy routines...
How to deliver the info.
*/
extern "C" void
magmablas_dlat2s(
char uplo, magma_int_t n,
const double *A, magma_int_t lda,
float *SA, magma_int_t ldsa,
magma_int_t *info)
{
/*
The routine converts a DOUBLE PRECISION triangular
matrix A (along with its block diagonal entries) to a SINGLE PRECISION
triangular matrix SA (along with its block diagonal entries).
*/
*info = 0;
mdlat2s( uplo, n, A, lda, SA, ldsa, info );
/*
int val = cublasIdamax(n, WORK, 1);
double retVal[1];
cublasGetMatrix( 1, 1, sizeof( double ), WORK+val-1, 1, retVal, 1 ) ;
return retVal[0];
*/
}
#endif /* (!defined(PRECISION_z)) || (GPUSHMEM >= 200) */
|
fd9da38a5b56f4437c105997cbb3a41ab15e3c3f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda.h>
#include<cuda_runtime.h>
#include <stdio.h>
#define CONFIG_MASK_SIZE 32
#define CONFIG_IMAGE_WIDTH 640
#define CONFIG_IMAGE_HEIGHT 480
#define CONFIG_IMAGE_CHANNEL 3
#define CONFIG_COMPUTE_COUNT 10000
template <int BLOCK_SIZE, int MASK_SIZE, int CHANNELS>
__global__ void WatermarkKernel(
const unsigned char *srcImage, //HxWxC
const unsigned char *mask, // CONFIG_MASK_SIZE x CONFIG_MASK_SIZE
unsigned char *dstImage){
__shared__ float localMem[MASK_SIZE][MASK_SIZE][CHANNELS];
unsigned long index, maskIndex;
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
//Global index
int gx = bx*BLOCK_SIZE+tx;
int gy = by*BLOCK_SIZE+ty;
index = gy*CONFIG_IMAGE_WIDTH*CONFIG_IMAGE_CHANNEL + gx*CONFIG_IMAGE_CHANNEL;
for(int iter=0; iter<CONFIG_COMPUTE_COUNT; iter++) {
for (int c = 0; c < 3; c++) {
maskIndex = ty * MASK_SIZE + tx;
localMem[ty][tx][c] = (unsigned char) (srcImage[index + c] * (float) mask[maskIndex] / 256.0f);
}
}
__syncthreads();
for(int c=0; c<3; c++){
localMem[ty][tx][c] = localMem[tx][ty][c];
}
__syncthreads();
for(int c=0; c<3; c++){
dstImage[index+c] = (unsigned char) (localMem[ty][tx][c]);
}
}
void LaunchKernel_WatermarkKernel(
const unsigned char *srcImage,
const unsigned char *mask,
unsigned char *dstImage){
dim3 block(CONFIG_MASK_SIZE,CONFIG_MASK_SIZE,1);
dim3 grid(
CONFIG_IMAGE_WIDTH/CONFIG_MASK_SIZE,
CONFIG_IMAGE_HEIGHT/CONFIG_MASK_SIZE,
1);
//printf("BLOCK = (%d,%d,%d)\n", block.x, block.y, block.z);
//printf("GRID = (%d,%d,%d)\n", grid.x, grid.y, grid.z);
hipLaunchKernelGGL(( WatermarkKernel<CONFIG_MASK_SIZE, CONFIG_MASK_SIZE, CONFIG_IMAGE_CHANNEL>) , dim3(grid), dim3(block), 0, 0,
srcImage,
mask,
dstImage);
}
|
fd9da38a5b56f4437c105997cbb3a41ab15e3c3f.cu
|
#include<cuda.h>
#include<cuda_runtime.h>
#include <stdio.h>
#define CONFIG_MASK_SIZE 32
#define CONFIG_IMAGE_WIDTH 640
#define CONFIG_IMAGE_HEIGHT 480
#define CONFIG_IMAGE_CHANNEL 3
#define CONFIG_COMPUTE_COUNT 10000
template <int BLOCK_SIZE, int MASK_SIZE, int CHANNELS>
__global__ void WatermarkKernel(
const unsigned char *srcImage, //HxWxC
const unsigned char *mask, // CONFIG_MASK_SIZE x CONFIG_MASK_SIZE
unsigned char *dstImage){
__shared__ float localMem[MASK_SIZE][MASK_SIZE][CHANNELS];
unsigned long index, maskIndex;
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
//Global index
int gx = bx*BLOCK_SIZE+tx;
int gy = by*BLOCK_SIZE+ty;
index = gy*CONFIG_IMAGE_WIDTH*CONFIG_IMAGE_CHANNEL + gx*CONFIG_IMAGE_CHANNEL;
for(int iter=0; iter<CONFIG_COMPUTE_COUNT; iter++) {
for (int c = 0; c < 3; c++) {
maskIndex = ty * MASK_SIZE + tx;
localMem[ty][tx][c] = (unsigned char) (srcImage[index + c] * (float) mask[maskIndex] / 256.0f);
}
}
__syncthreads();
for(int c=0; c<3; c++){
localMem[ty][tx][c] = localMem[tx][ty][c];
}
__syncthreads();
for(int c=0; c<3; c++){
dstImage[index+c] = (unsigned char) (localMem[ty][tx][c]);
}
}
void LaunchKernel_WatermarkKernel(
const unsigned char *srcImage,
const unsigned char *mask,
unsigned char *dstImage){
dim3 block(CONFIG_MASK_SIZE,CONFIG_MASK_SIZE,1);
dim3 grid(
CONFIG_IMAGE_WIDTH/CONFIG_MASK_SIZE,
CONFIG_IMAGE_HEIGHT/CONFIG_MASK_SIZE,
1);
//printf("BLOCK = (%d,%d,%d)\n", block.x, block.y, block.z);
//printf("GRID = (%d,%d,%d)\n", grid.x, grid.y, grid.z);
WatermarkKernel<CONFIG_MASK_SIZE, CONFIG_MASK_SIZE, CONFIG_IMAGE_CHANNEL> <<<grid, block>>>(
srcImage,
mask,
dstImage);
}
|
418357a7a8631a5e46589e851f340cdf57151cf1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float* var_29,float* var_30,float* var_31,float var_32,float var_33,float var_34,float var_35,float var_36,float var_37,float var_38,float var_39) {
if (comp > +1.5965E-6f * +0.0f - sinf(acosf(sinf((var_3 * var_4 / (+0.0f - -1.7945E-17f)))))) {
float tmp_1 = var_5 * -1.5286E-43f / var_6;
comp = tmp_1 - var_7 - fmodf(-1.6283E-35f, acosf((+0.0f / (var_8 * (+1.5672E35f / var_9)))));
for (int i=0; i < var_1; ++i) {
float tmp_2 = -1.0614E36f;
comp = tmp_2 - var_10 / (+0.0f - -0.0f * (var_11 + var_12));
}
if (comp == +1.2665E4f - (var_13 - (-0.0f * sqrtf(var_14 / var_15 - expf(var_16 * +1.5636E-42f / -0.0f - var_17))))) {
comp += -0.0f * (-1.0037E-35f + -0.0f / (var_18 - var_19));
comp = powf((-0.0f - -1.0754E-42f), fmodf((-1.0412E34f - var_20 + atan2f((-1.4733E-21f * +1.9789E36f + -1.8577E-27f / (var_21 + var_22 / var_23)), var_24 * ldexpf(asinf((var_25 + var_26)), 2))), (-1.4438E12f - (var_27 + var_28 - +0.0f))));
comp += asinf(-1.4041E-14f);
}
for (int i=0; i < var_2; ++i) {
var_29[i] = +1.6733E-28f / (+1.5325E36f - +1.4437E34f / atanf(+1.4076E17f / (-1.5764E-41f / ceilf((+1.9181E-43f * var_32 + (var_33 * +0.0f))))));
var_30[i] = atan2f(-1.9927E-42f, var_34 + tanhf(var_35 * (var_36 / +1.2159E-12f / +1.5273E-37f)));
var_31[i] = -0.0f;
comp += var_31[i] + var_30[i] * var_29[i] - var_37 * var_38 / var_39;
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float* tmp_30 = initPointer( atof(argv[30]) );
float* tmp_31 = initPointer( atof(argv[31]) );
float* tmp_32 = initPointer( atof(argv[32]) );
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
float tmp_35 = atof(argv[35]);
float tmp_36 = atof(argv[36]);
float tmp_37 = atof(argv[37]);
float tmp_38 = atof(argv[38]);
float tmp_39 = atof(argv[39]);
float tmp_40 = atof(argv[40]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34,tmp_35,tmp_36,tmp_37,tmp_38,tmp_39,tmp_40);
hipDeviceSynchronize();
return 0;
}
|
418357a7a8631a5e46589e851f340cdf57151cf1.cu
|
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float* var_29,float* var_30,float* var_31,float var_32,float var_33,float var_34,float var_35,float var_36,float var_37,float var_38,float var_39) {
if (comp > +1.5965E-6f * +0.0f - sinf(acosf(sinf((var_3 * var_4 / (+0.0f - -1.7945E-17f)))))) {
float tmp_1 = var_5 * -1.5286E-43f / var_6;
comp = tmp_1 - var_7 - fmodf(-1.6283E-35f, acosf((+0.0f / (var_8 * (+1.5672E35f / var_9)))));
for (int i=0; i < var_1; ++i) {
float tmp_2 = -1.0614E36f;
comp = tmp_2 - var_10 / (+0.0f - -0.0f * (var_11 + var_12));
}
if (comp == +1.2665E4f - (var_13 - (-0.0f * sqrtf(var_14 / var_15 - expf(var_16 * +1.5636E-42f / -0.0f - var_17))))) {
comp += -0.0f * (-1.0037E-35f + -0.0f / (var_18 - var_19));
comp = powf((-0.0f - -1.0754E-42f), fmodf((-1.0412E34f - var_20 + atan2f((-1.4733E-21f * +1.9789E36f + -1.8577E-27f / (var_21 + var_22 / var_23)), var_24 * ldexpf(asinf((var_25 + var_26)), 2))), (-1.4438E12f - (var_27 + var_28 - +0.0f))));
comp += asinf(-1.4041E-14f);
}
for (int i=0; i < var_2; ++i) {
var_29[i] = +1.6733E-28f / (+1.5325E36f - +1.4437E34f / atanf(+1.4076E17f / (-1.5764E-41f / ceilf((+1.9181E-43f * var_32 + (var_33 * +0.0f))))));
var_30[i] = atan2f(-1.9927E-42f, var_34 + tanhf(var_35 * (var_36 / +1.2159E-12f / +1.5273E-37f)));
var_31[i] = -0.0f;
comp += var_31[i] + var_30[i] * var_29[i] - var_37 * var_38 / var_39;
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float* tmp_30 = initPointer( atof(argv[30]) );
float* tmp_31 = initPointer( atof(argv[31]) );
float* tmp_32 = initPointer( atof(argv[32]) );
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
float tmp_35 = atof(argv[35]);
float tmp_36 = atof(argv[36]);
float tmp_37 = atof(argv[37]);
float tmp_38 = atof(argv[38]);
float tmp_39 = atof(argv[39]);
float tmp_40 = atof(argv[40]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34,tmp_35,tmp_36,tmp_37,tmp_38,tmp_39,tmp_40);
cudaDeviceSynchronize();
return 0;
}
|
7faef785829c29bb1d74f161bc2244eda41e70ef.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// std includes
#include <iostream>
#include <time.h>
#include <string>
#include <fstream>
#include <sstream>
// local includes
#include "Color.hpp"
#include "Ray.hpp"
#include "Intersection.hpp"
#include "Camera.hpp"
#include "Comps.hpp"
#include "World.cuh"
#include "CUDAVector.cuh"
// defines
#define DEBUG false
#define RENDER true
#define PRINTIMG false
#define SAVING true
#define RUN true
#define MAX_DEPTH 3
__global__ void renderRefraction(CUDAVector<Image_buffer>& image_buffer, int max_x, int max_y, World& world) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
// only render if it has been set
if (image_buffer[pixel_index].renderReflection()) {
Ray ray(image_buffer[pixel_index].reflection.origin, image_buffer[pixel_index].reflection.direction);
Intersection intersection;
world.intersect(intersection, ray);
if (intersection.hit()) {
Comps comps(world.prepare_computations(intersection, ray));
world.color_at3(comps2, image_buffer[pixel_index]);
}
else {
image_buffer[pixel_index].setRefractFalse();
}
}
// only render if it has been set
if (image_buffer[pixel_index].renderRefraction()) {
Ray ray(image_buffer[pixel_index].refraction.origin, image_buffer[pixel_index].refraction.direction);
Intersection intersection;
world.intersect(intersection, ray);
if (intersection.hit()) {
Comps comps(world.prepare_computations(intersection, ray));
world.color_at3(comps2, image_buffer[pixel_index]);
}
else {
image_buffer[pixel_index].setRefractFalse();
}
}
}
// maybe also works with device vectors ?
__global__ void renderWorld2(CUDAVector<Image_buffer>& image_buffer, int max_x, int max_y, World& world) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
// Camera Ray
float X = 2.0f * float(i) / float(max_x) - 1;
float Y = -2.0f * float(j) / float(max_y) + 1;
Ray ray(world.camera->getRay(X, Y));
// checking for Intersections
Intersection intersection;
world.intersect(intersection, ray);
if (intersection.hit()) {
Comps comps2(world.prepare_computations(intersection, ray));
world.color_at3(comps2, image_buffer[pixel_index]);
}
}
void CUDAmain_run2(const std::string& filename) {
World* world;
checkCudaErrors(hipMallocManaged((void**)&world, sizeof(World*)));
world->createWorld(filename);
while (true) {
// threads & blocks
int tx = 8;
int ty = 8;
// image params from World
int nx = world->getWidth();
int ny = world->getHeight();
int num_pixels = nx * ny;
CUDAVector<Image_buffer> image_buffer;
image_buffer.reserve(num_pixels);
// render image
if (RENDER) {
// render initial state
renderWorld2 << <blocks, threads >> > (image_buffer, nx, ny, *world);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// now render refractions
for (int numRefractions = 0; numRefractions < MAX_DEPTH; numRefractions++) {
renderRefraction << <blocks, threads >> > (image_buffer, nx, ny, *world);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
}
}
// save image
if (SAVING) {
std::ofstream out("out.ppm");
out << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny - 1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
int pixel_index = j * nx + i;
int ir = int(255.99 * image_buffer[pixel_index].color.r);
int ig = int(255.99 * image_buffer[pixel_index].color.g);
int ib = int(255.99 * image_buffer[pixel_index].color.b);
out << ir << " " << ig << " " << ib << "\n";
}
}
}
// what next?
char input[10];
std::cerr << "What action now?\n exit: programm stops\ncamera: load new camera\nworld: reload world\n";
std::cin >> input;
if (strcmp(input, "camera") == 0) {
std::ifstream file_("camera.txt");
if (!file_.is_open()) {
printf("Camera file not found!");
break;
}
std::string line_;
while (getline(file_, line_)) {
if (line_[0] == '#') continue;
if (line_.empty()) continue;
std::stringstream input(line_);
std::string paramName;
input >> paramName;
if (paramName == "Camera:") {
float px, py, pz, fx, fy, fz, ux, uy, uz, fov;
input >> px >> py >> pz >> fx >> fy >> fz >> ux >> uy >> uz >> fov;
Vec4 origin(px, py, pz, 1.0f);
Vec4 forward(fx, fy, fz);
Vec4 upguide(ux, uy, uz);
fov = fov * PI / 180.f;
world->camera->createCamera(origin, forward, upguide, fov, aspectRatio);
std::cerr << "Adjusted Camera\n";
break;
}
}
}
}
}
/*
RENDER FUNCTION
*/
__global__ void renderWorld(Color* fb_color, int max_x, int max_y, World& world) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
// Camera Ray
float X = 2.0f * float(i) / float(max_x) - 1;
float Y = -2.0f * float(j) / float(max_y) + 1;
Ray ray(world.camera->getRay(X, Y));
//if (pixel_index != 0) return;
// checking for Intersections
Intersection intersection;
world.intersect(intersection, ray);
if (intersection.hit()) {
Comps comps2;
comps2 = world.prepare_computations(intersection, ray);
// Color to change
Color color(0);
world.color_at2(comps2, color);
fb_color[pixel_index] = color;
}
else {
fb_color[pixel_index] = Color(0);
}
}
/*
MAIN FUNCTION -> 1st argument is supposed to be Input Scene
*/
void CUDAmain(const std::string& filename) {
clock_t start_w, stop_w;
start_w = clock();
World* world;
checkCudaErrors(hipMallocManaged((void**)&world, sizeof(World*)));
world->createWorld(filename);
stop_w = clock();
double timer_world = ((double)(stop_w - start_w)) / CLOCKS_PER_SEC;
std::cerr << "Loading World in " << timer_world << " seconds.\n";
world->print();
Triangle t1 = world->triangle_vector[0];
Triangle t2 = world->triangle_vector[1];
// image params from World
int nx = world->getWidth();
int ny = world->getHeight();
// threads & blocks
int tx = 8;
int ty = 8;
std::cerr << "Rendering a " << nx << "x" << ny << " image ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = nx * ny;
size_t fb_color_size = num_pixels * sizeof(Color);
// allocate FB with color
Color* fb_color;
checkCudaErrors(hipMallocManaged((void**)&fb_color, fb_color_size));
// utility
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(nx / tx + 1, ny / ty + 1);
dim3 threads(tx, ty);
// render image
if (RENDER) renderWorld << <blocks, threads >> > (fb_color, nx, ny, *world);
//if (RENDER) renderWorld << <blocks, threads >> > (fb_color, nx, ny, world);
// sync
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
//__host__ void exportImg(std::string && filename)
if (SAVING) {
std::ofstream out("out.ppm");
out << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny - 1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j * nx + i;
int ir = int(255.99 * fb_color[pixel_index].r);
int ig = int(255.99 * fb_color[pixel_index].g);
int ib = int(255.99 * fb_color[pixel_index].b);
out << ir << " " << ig << " " << ib << "\n";
}
}
}
// Output FB_Color as Image, change later to report std::cout
if (PRINTIMG) {
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny - 1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j * nx + i;
int ir = int(255.99 * fb_color[pixel_index].r);
int ig = int(255.99 * fb_color[pixel_index].g);
int ib = int(255.99 * fb_color[pixel_index].b);
std::cout << ir << " " << ig << " " << ib << "\n";
}
}
}
// Free allocated Memory
checkCudaErrors(hipFree(fb_color));
checkCudaErrors(hipFree(world));
}
void CUDAmain_run(const std::string& filename) {
// init world
clock_t start_w, stop_w;
start_w = clock();
World* world;
checkCudaErrors(hipMallocManaged((void**)&world, sizeof(World*)));
world->createWorld(filename);
stop_w = clock();
double timer_world = ((double)(stop_w - start_w)) / CLOCKS_PER_SEC;
std::cerr << "Loading World in " << timer_world << " seconds.\n";
world->print();
// allocate FB with color
Color* fb_color;
while (true) {
// image params from World
int nx = world->getWidth();
int ny = world->getHeight();
// threads & blocks
int tx = 8;
int ty = 8;
std::cerr << "Rendering a " << nx << "x" << ny << " image ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = nx * ny;
size_t fb_color_size = num_pixels * sizeof(Color);
checkCudaErrors(hipMallocManaged((void**)&fb_color, fb_color_size));
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(nx / tx + 1, ny / ty + 1);
dim3 threads(tx, ty);
// render image
if (RENDER) renderWorld << <blocks, threads >> > (fb_color, nx, ny, *world);
// sync
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
if (SAVING) {
std::ofstream out("out.ppm");
out << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny - 1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j * nx + i;
int ir = int(255.99 * fb_color[pixel_index].r);
int ig = int(255.99 * fb_color[pixel_index].g);
int ib = int(255.99 * fb_color[pixel_index].b);
out << ir << " " << ig << " " << ib << "\n";
}
}
}
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
checkCudaErrors(hipFree(fb_color));
// what next?
char input[10];
std::cerr << "What action now?\n exit: programm stops\ncamera: load new camera\nworld: reload world\n";
std::cin >> input;
if (strcmp(input, "exit") == 0) {
break;
}
if (strcmp(input, "camera") == 0) {
std::ifstream file_("camera.txt");
if (!file_.is_open()) {
printf("Camera file not found!");
break;
}
std::string line_;
while (getline(file_, line_)) {
if (line_[0] == '#') continue;
if (line_.empty()) continue;
std::stringstream input(line_);
std::string paramName;
input >> paramName;
if (paramName == "Camera:") {
float px, py, pz, fx, fy, fz, ux, uy, uz, fov;
input >> px >> py >> pz >> fx >> fy >> fz >> ux >> uy >> uz >> fov;
Vec4 origin(px, py, pz, 1.0f);
Vec4 forward(fx, fy, fz);
Vec4 upguide(ux, uy, uz);
fov = fov * PI / 180.f;
world->camera->createCamera(origin, forward, upguide, fov, aspectRatio);
std::cerr << "Adjusted Camera\n";
break;
}
}
}
}
// Free allocated Memory
checkCudaErrors(hipFree(fb_color));
checkCudaErrors(hipFree(world));
}
int main(int argc, char* argv[]) {
if (RUN) {
std::string filename = "Scene.txt";
CUDA_run_reflection(filename);
return 0;
}
if (DEBUG) {
std::string texturepath = "textures/sls_interior.tga";
Texture t(texturepath,0);
std::string filename = "textures/copy.tga";
t.exportImg("textures/copy.tga");
return 0;
}
if (RUN) {
std::string filename = "Scene.txt";
CUDAmain_run(filename);
return 0;
}
if (false) {
if (argc == 1) {
printf("No Input File was specified!\n");
return 0;
}
std::string filename = argv[1];
CUDAmain(filename);
}
else {
std::string filename = "Scene.txt";
CUDAmain(filename);
}
return 0;
}
|
7faef785829c29bb1d74f161bc2244eda41e70ef.cu
|
// std includes
#include <iostream>
#include <time.h>
#include <string>
#include <fstream>
#include <sstream>
// local includes
#include "Color.hpp"
#include "Ray.hpp"
#include "Intersection.hpp"
#include "Camera.hpp"
#include "Comps.hpp"
#include "World.cuh"
#include "CUDAVector.cuh"
// defines
#define DEBUG false
#define RENDER true
#define PRINTIMG false
#define SAVING true
#define RUN true
#define MAX_DEPTH 3
__global__ void renderRefraction(CUDAVector<Image_buffer>& image_buffer, int max_x, int max_y, World& world) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
// only render if it has been set
if (image_buffer[pixel_index].renderReflection()) {
Ray ray(image_buffer[pixel_index].reflection.origin, image_buffer[pixel_index].reflection.direction);
Intersection intersection;
world.intersect(intersection, ray);
if (intersection.hit()) {
Comps comps(world.prepare_computations(intersection, ray));
world.color_at3(comps2, image_buffer[pixel_index]);
}
else {
image_buffer[pixel_index].setRefractFalse();
}
}
// only render if it has been set
if (image_buffer[pixel_index].renderRefraction()) {
Ray ray(image_buffer[pixel_index].refraction.origin, image_buffer[pixel_index].refraction.direction);
Intersection intersection;
world.intersect(intersection, ray);
if (intersection.hit()) {
Comps comps(world.prepare_computations(intersection, ray));
world.color_at3(comps2, image_buffer[pixel_index]);
}
else {
image_buffer[pixel_index].setRefractFalse();
}
}
}
// maybe also works with device vectors ?
__global__ void renderWorld2(CUDAVector<Image_buffer>& image_buffer, int max_x, int max_y, World& world) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
// Camera Ray
float X = 2.0f * float(i) / float(max_x) - 1;
float Y = -2.0f * float(j) / float(max_y) + 1;
Ray ray(world.camera->getRay(X, Y));
// checking for Intersections
Intersection intersection;
world.intersect(intersection, ray);
if (intersection.hit()) {
Comps comps2(world.prepare_computations(intersection, ray));
world.color_at3(comps2, image_buffer[pixel_index]);
}
}
void CUDAmain_run2(const std::string& filename) {
World* world;
checkCudaErrors(cudaMallocManaged((void**)&world, sizeof(World*)));
world->createWorld(filename);
while (true) {
// threads & blocks
int tx = 8;
int ty = 8;
// image params from World
int nx = world->getWidth();
int ny = world->getHeight();
int num_pixels = nx * ny;
CUDAVector<Image_buffer> image_buffer;
image_buffer.reserve(num_pixels);
// render image
if (RENDER) {
// render initial state
renderWorld2 << <blocks, threads >> > (image_buffer, nx, ny, *world);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// now render refractions
for (int numRefractions = 0; numRefractions < MAX_DEPTH; numRefractions++) {
renderRefraction << <blocks, threads >> > (image_buffer, nx, ny, *world);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
}
}
// save image
if (SAVING) {
std::ofstream out("out.ppm");
out << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny - 1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
int pixel_index = j * nx + i;
int ir = int(255.99 * image_buffer[pixel_index].color.r);
int ig = int(255.99 * image_buffer[pixel_index].color.g);
int ib = int(255.99 * image_buffer[pixel_index].color.b);
out << ir << " " << ig << " " << ib << "\n";
}
}
}
// what next?
char input[10];
std::cerr << "What action now?\n exit: programm stops\ncamera: load new camera\nworld: reload world\n";
std::cin >> input;
if (strcmp(input, "camera") == 0) {
std::ifstream file_("camera.txt");
if (!file_.is_open()) {
printf("Camera file not found!");
break;
}
std::string line_;
while (getline(file_, line_)) {
if (line_[0] == '#') continue;
if (line_.empty()) continue;
std::stringstream input(line_);
std::string paramName;
input >> paramName;
if (paramName == "Camera:") {
float px, py, pz, fx, fy, fz, ux, uy, uz, fov;
input >> px >> py >> pz >> fx >> fy >> fz >> ux >> uy >> uz >> fov;
Vec4 origin(px, py, pz, 1.0f);
Vec4 forward(fx, fy, fz);
Vec4 upguide(ux, uy, uz);
fov = fov * PI / 180.f;
world->camera->createCamera(origin, forward, upguide, fov, aspectRatio);
std::cerr << "Adjusted Camera\n";
break;
}
}
}
}
}
/*
RENDER FUNCTION
*/
__global__ void renderWorld(Color* fb_color, int max_x, int max_y, World& world) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
// Camera Ray
float X = 2.0f * float(i) / float(max_x) - 1;
float Y = -2.0f * float(j) / float(max_y) + 1;
Ray ray(world.camera->getRay(X, Y));
//if (pixel_index != 0) return;
// checking for Intersections
Intersection intersection;
world.intersect(intersection, ray);
if (intersection.hit()) {
Comps comps2;
comps2 = world.prepare_computations(intersection, ray);
// Color to change
Color color(0);
world.color_at2(comps2, color);
fb_color[pixel_index] = color;
}
else {
fb_color[pixel_index] = Color(0);
}
}
/*
MAIN FUNCTION -> 1st argument is supposed to be Input Scene
*/
void CUDAmain(const std::string& filename) {
clock_t start_w, stop_w;
start_w = clock();
World* world;
checkCudaErrors(cudaMallocManaged((void**)&world, sizeof(World*)));
world->createWorld(filename);
stop_w = clock();
double timer_world = ((double)(stop_w - start_w)) / CLOCKS_PER_SEC;
std::cerr << "Loading World in " << timer_world << " seconds.\n";
world->print();
Triangle t1 = world->triangle_vector[0];
Triangle t2 = world->triangle_vector[1];
// image params from World
int nx = world->getWidth();
int ny = world->getHeight();
// threads & blocks
int tx = 8;
int ty = 8;
std::cerr << "Rendering a " << nx << "x" << ny << " image ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = nx * ny;
size_t fb_color_size = num_pixels * sizeof(Color);
// allocate FB with color
Color* fb_color;
checkCudaErrors(cudaMallocManaged((void**)&fb_color, fb_color_size));
// utility
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(nx / tx + 1, ny / ty + 1);
dim3 threads(tx, ty);
// render image
if (RENDER) renderWorld << <blocks, threads >> > (fb_color, nx, ny, *world);
//if (RENDER) renderWorld << <blocks, threads >> > (fb_color, nx, ny, world);
// sync
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
//__host__ void exportImg(std::string && filename)
if (SAVING) {
std::ofstream out("out.ppm");
out << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny - 1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j * nx + i;
int ir = int(255.99 * fb_color[pixel_index].r);
int ig = int(255.99 * fb_color[pixel_index].g);
int ib = int(255.99 * fb_color[pixel_index].b);
out << ir << " " << ig << " " << ib << "\n";
}
}
}
// Output FB_Color as Image, change later to report std::cout
if (PRINTIMG) {
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny - 1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j * nx + i;
int ir = int(255.99 * fb_color[pixel_index].r);
int ig = int(255.99 * fb_color[pixel_index].g);
int ib = int(255.99 * fb_color[pixel_index].b);
std::cout << ir << " " << ig << " " << ib << "\n";
}
}
}
// Free allocated Memory
checkCudaErrors(cudaFree(fb_color));
checkCudaErrors(cudaFree(world));
}
void CUDAmain_run(const std::string& filename) {
// init world
clock_t start_w, stop_w;
start_w = clock();
World* world;
checkCudaErrors(cudaMallocManaged((void**)&world, sizeof(World*)));
world->createWorld(filename);
stop_w = clock();
double timer_world = ((double)(stop_w - start_w)) / CLOCKS_PER_SEC;
std::cerr << "Loading World in " << timer_world << " seconds.\n";
world->print();
// allocate FB with color
Color* fb_color;
while (true) {
// image params from World
int nx = world->getWidth();
int ny = world->getHeight();
// threads & blocks
int tx = 8;
int ty = 8;
std::cerr << "Rendering a " << nx << "x" << ny << " image ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = nx * ny;
size_t fb_color_size = num_pixels * sizeof(Color);
checkCudaErrors(cudaMallocManaged((void**)&fb_color, fb_color_size));
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(nx / tx + 1, ny / ty + 1);
dim3 threads(tx, ty);
// render image
if (RENDER) renderWorld << <blocks, threads >> > (fb_color, nx, ny, *world);
// sync
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
if (SAVING) {
std::ofstream out("out.ppm");
out << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny - 1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j * nx + i;
int ir = int(255.99 * fb_color[pixel_index].r);
int ig = int(255.99 * fb_color[pixel_index].g);
int ib = int(255.99 * fb_color[pixel_index].b);
out << ir << " " << ig << " " << ib << "\n";
}
}
}
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
checkCudaErrors(cudaFree(fb_color));
// what next?
char input[10];
std::cerr << "What action now?\n exit: programm stops\ncamera: load new camera\nworld: reload world\n";
std::cin >> input;
if (strcmp(input, "exit") == 0) {
break;
}
if (strcmp(input, "camera") == 0) {
std::ifstream file_("camera.txt");
if (!file_.is_open()) {
printf("Camera file not found!");
break;
}
std::string line_;
while (getline(file_, line_)) {
if (line_[0] == '#') continue;
if (line_.empty()) continue;
std::stringstream input(line_);
std::string paramName;
input >> paramName;
if (paramName == "Camera:") {
float px, py, pz, fx, fy, fz, ux, uy, uz, fov;
input >> px >> py >> pz >> fx >> fy >> fz >> ux >> uy >> uz >> fov;
Vec4 origin(px, py, pz, 1.0f);
Vec4 forward(fx, fy, fz);
Vec4 upguide(ux, uy, uz);
fov = fov * PI / 180.f;
world->camera->createCamera(origin, forward, upguide, fov, aspectRatio);
std::cerr << "Adjusted Camera\n";
break;
}
}
}
}
// Free allocated Memory
checkCudaErrors(cudaFree(fb_color));
checkCudaErrors(cudaFree(world));
}
int main(int argc, char* argv[]) {
if (RUN) {
std::string filename = "Scene.txt";
CUDA_run_reflection(filename);
return 0;
}
if (DEBUG) {
std::string texturepath = "textures/sls_interior.tga";
Texture t(texturepath,0);
std::string filename = "textures/copy.tga";
t.exportImg("textures/copy.tga");
return 0;
}
if (RUN) {
std::string filename = "Scene.txt";
CUDAmain_run(filename);
return 0;
}
if (false) {
if (argc == 1) {
printf("No Input File was specified!\n");
return 0;
}
std::string filename = argv[1];
CUDAmain(filename);
}
else {
std::string filename = "Scene.txt";
CUDAmain(filename);
}
return 0;
}
|
61258b10507f0f15486eb701dfe562a9e99a5336.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define USE_LDG
#include "diffusion_cuda_shared.h"
#include "common/cuda_util.h"
namespace diffusion {
namespace cuda_shared3 {
#define GET(x) (x)
/*
Hoists boundary conditions out of the z-direction loop. Three
top-level conditional blocks, one corresponding to the horizontal
row at y == 0, another to the horizontal row at y == dimy-1, and the
other to the rest. The first section takes care of loading halos at
the x-direction. The y-direction halos are not cached for
simplicity, and it is expected not to have much performance
difference.
*/
__global__ void kernel3d(F1_DECL f1, F2_DECL f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
// shared memory shape is (dimx+2) * dimy. Halo for y dir is not
// cached.
extern __shared__ REAL sb[];
const int sbx = blockDim.x+2;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int xy = nx * ny;
const int block_z = nz / gridDim.z;
int k = block_z * blockIdx.z;
const int k_end = k + block_z;
int p = OFFSET3D(i, j, k, nx, ny);
int ps = threadIdx.x+1 + threadIdx.y * sbx;
float t1, t2, t3;
int s = (j == 0) ? 0 : -nx;
int n = (j == ny-1) ? 0 : nx;
t3 = GET(f1[p]);
t2 = (k == 0) ? t3 : GET(f1[p-xy]);
// Move out the boundary conditions from the loop body
if (threadIdx.y == 0) {
// the threads at row y == 0 also take care of loading vertical
// halows at x == 0 and x == blockDIm.x - 1
int w = (blockIdx.x == 0) ? 0 : -1;
int e = (blockIdx.x == gridDim.x-1) ? 0 : 1;
int h = (threadIdx.x < blockDim.y) ? w : (blockDim.x - 1 + e);
h = - threadIdx.x + h + (threadIdx.x & (blockDim.y-1)) * nx;
int sbt = (threadIdx.x & (blockDim.y-1)) * sbx;
// the latter half takes care of the east boundary
if (threadIdx.x >= blockDim.y) sbt += sbx-1;
for (; k < k_end-1; ++k) {
t1 = t2;
t2 = t3;
t3 = GET(f1[p+xy]);
sb[ps] = t2;
if (threadIdx.x < blockDim.y*2) {
sb[sbt] = LDG(f1 + p+h);
}
__syncthreads();
f2[p] = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs*GET(f1[p+s]) + cn*sb[ps+sbx] + cb*t1 + ct*t3;
p += xy;
__syncthreads();
}
t1 = t2;
t2 = t3;
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
if (threadIdx.x < blockDim.y*2) {
sb[sbt] = LDG(f1 + p+h);
}
__syncthreads();
f2[p] = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * GET(f1[p+s]) + cn * sb[ps+sbx] + cb * t1 + ct * t3;
} else if (threadIdx.y == blockDim.y - 1) {
for (; k < k_end-1; ++k) {
t1 = t2;
t2 = t3;
//t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
t3 = GET(f1[p+xy]);
sb[ps] = t2;
__syncthreads();
f2[p] = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]
+ cn * GET(f1[p+n])
+ cb * t1 + ct * t3;
p += xy;
__syncthreads();
}
t1 = t2;
t2 = t3;
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
__syncthreads();
f2[p] = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]+ cn * GET(f1[p+n]) + cb * t1 + ct * t3;
} else {
for (; k < k_end-1; ++k) {
t1 = t2;
t2 = t3;
t3 = GET(f1[p+xy]);
sb[ps] = t2;
__syncthreads();
f2[p] = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]+ cn * sb[ps+sbx]
+ cb * t1 + ct * t3;
p += xy;
__syncthreads();
}
t1 = t2;
t2 = t3;
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
__syncthreads();
f2[p] = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]+ cn * sb[ps+sbx] + cb * t1 + ct * t3;
}
return;
}
} // namespace cuda_shared3
void DiffusionCUDAShared3::RunKernel(int count) {
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
FORCE_CHECK_CUDA(hipMemcpy(f1_d_, f1_, s, hipMemcpyHostToDevice));
dim3 block_dim(block_x_, block_y_);
dim3 grid_dim(nx_ / block_x_, ny_ / block_y_);
if (ndim_ == 3) grid_dim.z = grid_z_;
CHECK_CUDA(hipEventRecord(ev1_));
for (int i = 0; i < count; ++i) {
hipLaunchKernelGGL(( cuda_shared3::kernel3d), dim3(grid_dim), dim3(block_dim),
(block_x_+2)*(block_y_)*sizeof(float), 0,
f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
REAL *t = f1_d_;
f1_d_ = f2_d_;
f2_d_ = t;
}
CHECK_CUDA(hipEventRecord(ev2_));
FORCE_CHECK_CUDA(hipMemcpy(f1_, f1_d_, s, hipMemcpyDeviceToHost));
return;
}
void DiffusionCUDAShared3::Setup() {
DiffusionCUDA::Setup();
FORCE_CHECK_CUDA(hipFuncSetCacheConfig(cuda_shared3::kernel3d,
hipFuncCachePreferShared));
}
} // namespace diffusion
|
61258b10507f0f15486eb701dfe562a9e99a5336.cu
|
#define USE_LDG
#include "diffusion_cuda_shared.h"
#include "common/cuda_util.h"
namespace diffusion {
namespace cuda_shared3 {
#define GET(x) (x)
/*
Hoists boundary conditions out of the z-direction loop. Three
top-level conditional blocks, one corresponding to the horizontal
row at y == 0, another to the horizontal row at y == dimy-1, and the
other to the rest. The first section takes care of loading halos at
the x-direction. The y-direction halos are not cached for
simplicity, and it is expected not to have much performance
difference.
*/
__global__ void kernel3d(F1_DECL f1, F2_DECL f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
// shared memory shape is (dimx+2) * dimy. Halo for y dir is not
// cached.
extern __shared__ REAL sb[];
const int sbx = blockDim.x+2;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int xy = nx * ny;
const int block_z = nz / gridDim.z;
int k = block_z * blockIdx.z;
const int k_end = k + block_z;
int p = OFFSET3D(i, j, k, nx, ny);
int ps = threadIdx.x+1 + threadIdx.y * sbx;
float t1, t2, t3;
int s = (j == 0) ? 0 : -nx;
int n = (j == ny-1) ? 0 : nx;
t3 = GET(f1[p]);
t2 = (k == 0) ? t3 : GET(f1[p-xy]);
// Move out the boundary conditions from the loop body
if (threadIdx.y == 0) {
// the threads at row y == 0 also take care of loading vertical
// halows at x == 0 and x == blockDIm.x - 1
int w = (blockIdx.x == 0) ? 0 : -1;
int e = (blockIdx.x == gridDim.x-1) ? 0 : 1;
int h = (threadIdx.x < blockDim.y) ? w : (blockDim.x - 1 + e);
h = - threadIdx.x + h + (threadIdx.x & (blockDim.y-1)) * nx;
int sbt = (threadIdx.x & (blockDim.y-1)) * sbx;
// the latter half takes care of the east boundary
if (threadIdx.x >= blockDim.y) sbt += sbx-1;
for (; k < k_end-1; ++k) {
t1 = t2;
t2 = t3;
t3 = GET(f1[p+xy]);
sb[ps] = t2;
if (threadIdx.x < blockDim.y*2) {
sb[sbt] = LDG(f1 + p+h);
}
__syncthreads();
f2[p] = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs*GET(f1[p+s]) + cn*sb[ps+sbx] + cb*t1 + ct*t3;
p += xy;
__syncthreads();
}
t1 = t2;
t2 = t3;
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
if (threadIdx.x < blockDim.y*2) {
sb[sbt] = LDG(f1 + p+h);
}
__syncthreads();
f2[p] = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * GET(f1[p+s]) + cn * sb[ps+sbx] + cb * t1 + ct * t3;
} else if (threadIdx.y == blockDim.y - 1) {
for (; k < k_end-1; ++k) {
t1 = t2;
t2 = t3;
//t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
t3 = GET(f1[p+xy]);
sb[ps] = t2;
__syncthreads();
f2[p] = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]
+ cn * GET(f1[p+n])
+ cb * t1 + ct * t3;
p += xy;
__syncthreads();
}
t1 = t2;
t2 = t3;
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
__syncthreads();
f2[p] = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]+ cn * GET(f1[p+n]) + cb * t1 + ct * t3;
} else {
for (; k < k_end-1; ++k) {
t1 = t2;
t2 = t3;
t3 = GET(f1[p+xy]);
sb[ps] = t2;
__syncthreads();
f2[p] = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]+ cn * sb[ps+sbx]
+ cb * t1 + ct * t3;
p += xy;
__syncthreads();
}
t1 = t2;
t2 = t3;
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
__syncthreads();
f2[p] = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]+ cn * sb[ps+sbx] + cb * t1 + ct * t3;
}
return;
}
} // namespace cuda_shared3
void DiffusionCUDAShared3::RunKernel(int count) {
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
FORCE_CHECK_CUDA(cudaMemcpy(f1_d_, f1_, s, cudaMemcpyHostToDevice));
dim3 block_dim(block_x_, block_y_);
dim3 grid_dim(nx_ / block_x_, ny_ / block_y_);
if (ndim_ == 3) grid_dim.z = grid_z_;
CHECK_CUDA(cudaEventRecord(ev1_));
for (int i = 0; i < count; ++i) {
cuda_shared3::kernel3d<<<grid_dim, block_dim,
(block_x_+2)*(block_y_)*sizeof(float)>>>
(f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
REAL *t = f1_d_;
f1_d_ = f2_d_;
f2_d_ = t;
}
CHECK_CUDA(cudaEventRecord(ev2_));
FORCE_CHECK_CUDA(cudaMemcpy(f1_, f1_d_, s, cudaMemcpyDeviceToHost));
return;
}
void DiffusionCUDAShared3::Setup() {
DiffusionCUDA::Setup();
FORCE_CHECK_CUDA(cudaFuncSetCacheConfig(cuda_shared3::kernel3d,
cudaFuncCachePreferShared));
}
} // namespace diffusion
|
b0ad1f9df180b5c7468fadd5209b25bb73111c77.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <experimental/include_cuco_static_map.cuh>
#include <experimental/detail/graph_utils.cuh>
#include <experimental/graph.hpp>
#include <experimental/graph_functions.hpp>
#include <utilities/collect_comm.cuh>
#include <utilities/error.hpp>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
namespace cugraph {
namespace experimental {
template <typename vertex_t, bool multi_gpu>
void renumber_ext_vertices(raft::handle_t const& handle,
vertex_t* vertices /* [INOUT] */,
size_t num_vertices,
vertex_t const* renumber_map_labels,
vertex_t local_int_vertex_first,
vertex_t local_int_vertex_last,
bool do_expensive_check)
{
double constexpr load_factor = 0.7;
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(handle.get_device_properties().major >= 7,
"renumber_vertices() not supported on Pascal and older architectures.");
#ifdef CUCO_STATIC_MAP_DEFINED
if (do_expensive_check) {
rmm::device_uvector<vertex_t> labels(local_int_vertex_last - local_int_vertex_first,
handle.get_stream());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
renumber_map_labels,
renumber_map_labels + labels.size(),
labels.begin());
thrust::sort(
rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), labels.begin(), labels.end());
CUGRAPH_EXPECTS(thrust::unique(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
labels.begin(),
labels.end()) == labels.end(),
"Invalid input arguments: renumber_map_labels have duplicate elements.");
}
auto renumber_map_ptr = std::make_unique<cuco::static_map<vertex_t, vertex_t>>(
size_t{0}, invalid_vertex_id<vertex_t>::value, invalid_vertex_id<vertex_t>::value);
if (multi_gpu) {
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
rmm::device_uvector<vertex_t> sorted_unique_ext_vertices(num_vertices, handle.get_stream());
sorted_unique_ext_vertices.resize(
thrust::distance(
sorted_unique_ext_vertices.begin(),
thrust::copy_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vertices,
vertices + num_vertices,
sorted_unique_ext_vertices.begin(),
[] __device__(auto v) { return v != invalid_vertex_id<vertex_t>::value; })),
handle.get_stream());
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_unique_ext_vertices.begin(),
sorted_unique_ext_vertices.end());
sorted_unique_ext_vertices.resize(
thrust::distance(
sorted_unique_ext_vertices.begin(),
thrust::unique(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_unique_ext_vertices.begin(),
sorted_unique_ext_vertices.end())),
handle.get_stream());
auto int_vertices_for_sorted_unique_ext_vertices = collect_values_for_unique_keys(
comm,
renumber_map_labels,
renumber_map_labels + (local_int_vertex_last - local_int_vertex_first),
thrust::make_counting_iterator(local_int_vertex_first),
sorted_unique_ext_vertices.begin(),
sorted_unique_ext_vertices.end(),
detail::compute_gpu_id_from_vertex_t<vertex_t>{comm_size},
handle.get_stream());
handle.get_stream_view().synchronize(); // cuco::static_map currently does not take stream
renumber_map_ptr.reset();
renumber_map_ptr = std::make_unique<cuco::static_map<vertex_t, vertex_t>>(
// FIXME: ::max(..., ...) as a temporary workaround for
// https://github.com/NVIDIA/cuCollections/issues/72 and
// https://github.com/NVIDIA/cuCollections/issues/73
::max(
static_cast<size_t>(static_cast<double>(sorted_unique_ext_vertices.size()) / load_factor),
sorted_unique_ext_vertices.size() + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value);
auto kv_pair_first = thrust::make_transform_iterator(
thrust::make_zip_iterator(thrust::make_tuple(
sorted_unique_ext_vertices.begin(), int_vertices_for_sorted_unique_ext_vertices.begin())),
[] __device__(auto val) {
return thrust::make_pair(thrust::get<0>(val), thrust::get<1>(val));
});
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (sorted_unique_ext_vertices.size()) {
renumber_map_ptr->insert(kv_pair_first, kv_pair_first + sorted_unique_ext_vertices.size());
}
} else {
handle.get_stream_view().synchronize(); // cuco::static_map currently does not take stream
renumber_map_ptr.reset();
renumber_map_ptr = std::make_unique<cuco::static_map<vertex_t, vertex_t>>(
// FIXME: ::max(..., ...) as a temporary workaround for
// https://github.com/NVIDIA/cuCollections/issues/72 and
// https://github.com/NVIDIA/cuCollections/issues/73
::max(static_cast<size_t>(
static_cast<double>(local_int_vertex_last - local_int_vertex_first) / load_factor),
static_cast<size_t>(local_int_vertex_last - local_int_vertex_first) + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value);
auto pair_first = thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(renumber_map_labels, thrust::make_counting_iterator(vertex_t{0}))),
[] __device__(auto val) {
return thrust::make_pair(thrust::get<0>(val), thrust::get<1>(val));
});
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if ((local_int_vertex_last - local_int_vertex_first) > 0) {
renumber_map_ptr->insert(pair_first,
pair_first + (local_int_vertex_last - local_int_vertex_first));
}
}
if (do_expensive_check) {
rmm::device_uvector<bool> contains(num_vertices, handle.get_stream());
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (num_vertices > 0) {
renumber_map_ptr->contains(vertices, vertices + num_vertices, contains.begin());
}
auto vc_pair_first = thrust::make_zip_iterator(thrust::make_tuple(vertices, contains.begin()));
CUGRAPH_EXPECTS(thrust::count_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vc_pair_first,
vc_pair_first + num_vertices,
[] __device__(auto pair) {
auto v = thrust::get<0>(pair);
auto c = thrust::get<1>(pair);
return v == invalid_vertex_id<vertex_t>::value
? (c == true)
: (c == false);
}) == 0,
"Invalid input arguments: vertices have elements that are missing in "
"(aggregate) renumber_map_labels.");
}
// FIXME: a temporary workaround for https://github.com/NVIDIA/cuCollections/issues/74
#if 1
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vertices,
vertices + num_vertices,
vertices,
[view = renumber_map_ptr->get_device_view()] __device__(auto v) {
return v != invalid_vertex_id<vertex_t>::value
? view.find(v)->second.load(cuda::std::memory_order_relaxed)
: invalid_vertex_id<vertex_t>::value;
});
#else
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (num_vertices > 0) { renumber_map_ptr->find(vertices, vertices + num_vertices, vertices); }
#endif
#endif
}
template <typename vertex_t>
void unrenumber_local_int_vertices(
raft::handle_t const& handle,
vertex_t* vertices /* [INOUT] */,
size_t num_vertices,
vertex_t const* renumber_map_labels /* size = local_int_vertex_last - local_int_vertex_first */,
vertex_t local_int_vertex_first,
vertex_t local_int_vertex_last,
bool do_expensive_check)
{
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(handle.get_device_properties().major >= 7,
"unrenumber_local_vertices() not supported on Pascal and older architectures.");
#ifdef CUCO_STATIC_MAP_DEFINED
if (do_expensive_check) {
CUGRAPH_EXPECTS(
thrust::count_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vertices,
vertices + num_vertices,
[local_int_vertex_first, local_int_vertex_last] __device__(auto v) {
return v != invalid_vertex_id<vertex_t>::value &&
(v < local_int_vertex_first || v >= local_int_vertex_last);
}) == 0,
"Invalid input arguments: there are non-local vertices in [vertices, vertices "
"+ num_vertices).");
}
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vertices,
vertices + num_vertices,
vertices,
[renumber_map_labels, local_int_vertex_first] __device__(auto v) {
return v == invalid_vertex_id<vertex_t>::value
? v
: renumber_map_labels[v - local_int_vertex_first];
});
#endif
}
template <typename vertex_t, bool multi_gpu>
void unrenumber_int_vertices(raft::handle_t const& handle,
vertex_t* vertices /* [INOUT] */,
size_t num_vertices,
vertex_t const* renumber_map_labels,
vertex_t local_int_vertex_first,
vertex_t local_int_vertex_last,
std::vector<vertex_t>& vertex_partition_lasts,
bool do_expensive_check)
{
double constexpr load_factor = 0.7;
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(handle.get_device_properties().major >= 7,
"unrenumber_vertices() not supported on Pascal and older architectures.");
#ifdef CUCO_STATIC_MAP_DEFINED
if (do_expensive_check) {
CUGRAPH_EXPECTS(
thrust::count_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vertices,
vertices + num_vertices,
[int_vertex_last = vertex_partition_lasts.back()] __device__(auto v) {
return v != invalid_vertex_id<vertex_t>::value &&
!is_valid_vertex(int_vertex_last, v);
}) == 0,
"Invalid input arguments: there are out-of-range vertices in [vertices, vertices "
"+ num_vertices).");
}
if (multi_gpu) {
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
rmm::device_uvector<vertex_t> sorted_unique_int_vertices(num_vertices, handle.get_stream());
sorted_unique_int_vertices.resize(
thrust::distance(
sorted_unique_int_vertices.begin(),
thrust::copy_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vertices,
vertices + num_vertices,
sorted_unique_int_vertices.begin(),
[] __device__(auto v) { return v != invalid_vertex_id<vertex_t>::value; })),
handle.get_stream());
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_unique_int_vertices.begin(),
sorted_unique_int_vertices.end());
sorted_unique_int_vertices.resize(
thrust::distance(
sorted_unique_int_vertices.begin(),
thrust::unique(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_unique_int_vertices.begin(),
sorted_unique_int_vertices.end())),
handle.get_stream());
rmm::device_uvector<vertex_t> d_vertex_partition_lasts(vertex_partition_lasts.size(),
handle.get_stream());
raft::update_device(d_vertex_partition_lasts.data(),
vertex_partition_lasts.data(),
vertex_partition_lasts.size(),
handle.get_stream());
rmm::device_uvector<size_t> d_tx_int_vertex_offsets(d_vertex_partition_lasts.size(),
handle.get_stream());
thrust::lower_bound(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_unique_int_vertices.begin(),
sorted_unique_int_vertices.end(),
d_vertex_partition_lasts.begin(),
d_vertex_partition_lasts.end(),
d_tx_int_vertex_offsets.begin());
std::vector<size_t> h_tx_int_vertex_counts(d_tx_int_vertex_offsets.size());
raft::update_host(h_tx_int_vertex_counts.data(),
d_tx_int_vertex_offsets.data(),
d_tx_int_vertex_offsets.size(),
handle.get_stream());
handle.get_stream_view().synchronize();
std::adjacent_difference(
h_tx_int_vertex_counts.begin(), h_tx_int_vertex_counts.end(), h_tx_int_vertex_counts.begin());
rmm::device_uvector<vertex_t> rx_int_vertices(0, handle.get_stream());
std::vector<size_t> rx_int_vertex_counts{};
std::tie(rx_int_vertices, rx_int_vertex_counts) = shuffle_values(
comm, sorted_unique_int_vertices.begin(), h_tx_int_vertex_counts, handle.get_stream());
auto tx_ext_vertices = std::move(rx_int_vertices);
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
tx_ext_vertices.begin(),
tx_ext_vertices.end(),
tx_ext_vertices.begin(),
[renumber_map_labels, local_int_vertex_first] __device__(auto v) {
return renumber_map_labels[v - local_int_vertex_first];
});
rmm::device_uvector<vertex_t> rx_ext_vertices_for_sorted_unique_int_vertices(
0, handle.get_stream());
std::tie(rx_ext_vertices_for_sorted_unique_int_vertices, std::ignore) =
shuffle_values(comm, tx_ext_vertices.begin(), rx_int_vertex_counts, handle.get_stream());
handle.get_stream_view().synchronize(); // cuco::static_map currently does not take stream
cuco::static_map<vertex_t, vertex_t> unrenumber_map(
// FIXME: ::max(..., ...) as a temporary workaround for
// https://github.com/NVIDIA/cuCollections/issues/72 and
// https://github.com/NVIDIA/cuCollections/issues/73
::max(
static_cast<size_t>(static_cast<double>(sorted_unique_int_vertices.size()) / load_factor),
sorted_unique_int_vertices.size() + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value);
auto pair_first = thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(sorted_unique_int_vertices.begin(),
rx_ext_vertices_for_sorted_unique_int_vertices.begin())),
[] __device__(auto val) {
return thrust::make_pair(thrust::get<0>(val), thrust::get<1>(val));
});
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (sorted_unique_int_vertices.size()) {
unrenumber_map.insert(pair_first, pair_first + sorted_unique_int_vertices.size());
}
// FIXME: a temporary workaround for https://github.com/NVIDIA/cuCollections/issues/74
#if 1
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vertices,
vertices + num_vertices,
vertices,
[view = unrenumber_map.get_device_view()] __device__(auto v) {
return v != invalid_vertex_id<vertex_t>::value
? view.find(v)->second.load(cuda::std::memory_order_relaxed)
: invalid_vertex_id<vertex_t>::value;
});
#else
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (num_vertices > 0) { unrenumber_map.find(vertices, vertices + num_vertices, vertices); }
#endif
} else {
unrenumber_local_int_vertices(handle,
vertices,
num_vertices,
renumber_map_labels,
local_int_vertex_first,
local_int_vertex_last,
do_expensive_check);
}
#endif
}
// explicit instantiation
template void renumber_ext_vertices<int32_t, false>(raft::handle_t const& handle,
int32_t* vertices,
size_t num_vertices,
int32_t const* renumber_map_labels,
int32_t local_int_vertex_first,
int32_t local_int_vertex_last,
bool do_expensive_check);
template void renumber_ext_vertices<int32_t, true>(raft::handle_t const& handle,
int32_t* vertices,
size_t num_vertices,
int32_t const* renumber_map_labels,
int32_t local_int_vertex_first,
int32_t local_int_vertex_last,
bool do_expensive_check);
template void renumber_ext_vertices<int64_t, false>(raft::handle_t const& handle,
int64_t* vertices,
size_t num_vertices,
int64_t const* renumber_map_labels,
int64_t local_int_vertex_first,
int64_t local_int_vertex_last,
bool do_expensive_check);
template void renumber_ext_vertices<int64_t, true>(raft::handle_t const& handle,
int64_t* vertices,
size_t num_vertices,
int64_t const* renumber_map_labels,
int64_t local_int_vertex_first,
int64_t local_int_vertex_last,
bool do_expensive_check);
template void unrenumber_local_int_vertices<int32_t>(raft::handle_t const& handle,
int32_t* vertices,
size_t num_vertices,
int32_t const* renumber_map_labels,
int32_t local_int_vertex_first,
int32_t local_int_vertex_last,
bool do_expensive_check);
template void unrenumber_local_int_vertices<int64_t>(raft::handle_t const& handle,
int64_t* vertices,
size_t num_vertices,
int64_t const* renumber_map_labels,
int64_t local_int_vertex_first,
int64_t local_int_vertex_last,
bool do_expensive_check);
template void unrenumber_int_vertices<int32_t, false>(raft::handle_t const& handle,
int32_t* vertices,
size_t num_vertices,
int32_t const* renumber_map_labels,
int32_t local_int_vertex_first,
int32_t local_int_vertex_last,
std::vector<int32_t>& vertex_partition_lasts,
bool do_expensive_check);
template void unrenumber_int_vertices<int32_t, true>(raft::handle_t const& handle,
int32_t* vertices,
size_t num_vertices,
int32_t const* renumber_map_labels,
int32_t local_int_vertex_first,
int32_t local_int_vertex_last,
std::vector<int32_t>& vertex_partition_lasts,
bool do_expensive_check);
template void unrenumber_int_vertices<int64_t, false>(raft::handle_t const& handle,
int64_t* vertices,
size_t num_vertices,
int64_t const* renumber_map_labels,
int64_t local_int_vertex_first,
int64_t local_int_vertex_last,
std::vector<int64_t>& vertex_partition_lasts,
bool do_expensive_check);
template void unrenumber_int_vertices<int64_t, true>(raft::handle_t const& handle,
int64_t* vertices,
size_t num_vertices,
int64_t const* renumber_map_labels,
int64_t local_int_vertex_first,
int64_t local_int_vertex_last,
std::vector<int64_t>& vertex_partition_lasts,
bool do_expensive_check);
} // namespace experimental
} // namespace cugraph
|
b0ad1f9df180b5c7468fadd5209b25bb73111c77.cu
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <experimental/include_cuco_static_map.cuh>
#include <experimental/detail/graph_utils.cuh>
#include <experimental/graph.hpp>
#include <experimental/graph_functions.hpp>
#include <utilities/collect_comm.cuh>
#include <utilities/error.hpp>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
namespace cugraph {
namespace experimental {
template <typename vertex_t, bool multi_gpu>
void renumber_ext_vertices(raft::handle_t const& handle,
vertex_t* vertices /* [INOUT] */,
size_t num_vertices,
vertex_t const* renumber_map_labels,
vertex_t local_int_vertex_first,
vertex_t local_int_vertex_last,
bool do_expensive_check)
{
double constexpr load_factor = 0.7;
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(handle.get_device_properties().major >= 7,
"renumber_vertices() not supported on Pascal and older architectures.");
#ifdef CUCO_STATIC_MAP_DEFINED
if (do_expensive_check) {
rmm::device_uvector<vertex_t> labels(local_int_vertex_last - local_int_vertex_first,
handle.get_stream());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
renumber_map_labels,
renumber_map_labels + labels.size(),
labels.begin());
thrust::sort(
rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), labels.begin(), labels.end());
CUGRAPH_EXPECTS(thrust::unique(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
labels.begin(),
labels.end()) == labels.end(),
"Invalid input arguments: renumber_map_labels have duplicate elements.");
}
auto renumber_map_ptr = std::make_unique<cuco::static_map<vertex_t, vertex_t>>(
size_t{0}, invalid_vertex_id<vertex_t>::value, invalid_vertex_id<vertex_t>::value);
if (multi_gpu) {
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
rmm::device_uvector<vertex_t> sorted_unique_ext_vertices(num_vertices, handle.get_stream());
sorted_unique_ext_vertices.resize(
thrust::distance(
sorted_unique_ext_vertices.begin(),
thrust::copy_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vertices,
vertices + num_vertices,
sorted_unique_ext_vertices.begin(),
[] __device__(auto v) { return v != invalid_vertex_id<vertex_t>::value; })),
handle.get_stream());
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_unique_ext_vertices.begin(),
sorted_unique_ext_vertices.end());
sorted_unique_ext_vertices.resize(
thrust::distance(
sorted_unique_ext_vertices.begin(),
thrust::unique(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_unique_ext_vertices.begin(),
sorted_unique_ext_vertices.end())),
handle.get_stream());
auto int_vertices_for_sorted_unique_ext_vertices = collect_values_for_unique_keys(
comm,
renumber_map_labels,
renumber_map_labels + (local_int_vertex_last - local_int_vertex_first),
thrust::make_counting_iterator(local_int_vertex_first),
sorted_unique_ext_vertices.begin(),
sorted_unique_ext_vertices.end(),
detail::compute_gpu_id_from_vertex_t<vertex_t>{comm_size},
handle.get_stream());
handle.get_stream_view().synchronize(); // cuco::static_map currently does not take stream
renumber_map_ptr.reset();
renumber_map_ptr = std::make_unique<cuco::static_map<vertex_t, vertex_t>>(
// FIXME: std::max(..., ...) as a temporary workaround for
// https://github.com/NVIDIA/cuCollections/issues/72 and
// https://github.com/NVIDIA/cuCollections/issues/73
std::max(
static_cast<size_t>(static_cast<double>(sorted_unique_ext_vertices.size()) / load_factor),
sorted_unique_ext_vertices.size() + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value);
auto kv_pair_first = thrust::make_transform_iterator(
thrust::make_zip_iterator(thrust::make_tuple(
sorted_unique_ext_vertices.begin(), int_vertices_for_sorted_unique_ext_vertices.begin())),
[] __device__(auto val) {
return thrust::make_pair(thrust::get<0>(val), thrust::get<1>(val));
});
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (sorted_unique_ext_vertices.size()) {
renumber_map_ptr->insert(kv_pair_first, kv_pair_first + sorted_unique_ext_vertices.size());
}
} else {
handle.get_stream_view().synchronize(); // cuco::static_map currently does not take stream
renumber_map_ptr.reset();
renumber_map_ptr = std::make_unique<cuco::static_map<vertex_t, vertex_t>>(
// FIXME: std::max(..., ...) as a temporary workaround for
// https://github.com/NVIDIA/cuCollections/issues/72 and
// https://github.com/NVIDIA/cuCollections/issues/73
std::max(static_cast<size_t>(
static_cast<double>(local_int_vertex_last - local_int_vertex_first) / load_factor),
static_cast<size_t>(local_int_vertex_last - local_int_vertex_first) + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value);
auto pair_first = thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(renumber_map_labels, thrust::make_counting_iterator(vertex_t{0}))),
[] __device__(auto val) {
return thrust::make_pair(thrust::get<0>(val), thrust::get<1>(val));
});
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if ((local_int_vertex_last - local_int_vertex_first) > 0) {
renumber_map_ptr->insert(pair_first,
pair_first + (local_int_vertex_last - local_int_vertex_first));
}
}
if (do_expensive_check) {
rmm::device_uvector<bool> contains(num_vertices, handle.get_stream());
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (num_vertices > 0) {
renumber_map_ptr->contains(vertices, vertices + num_vertices, contains.begin());
}
auto vc_pair_first = thrust::make_zip_iterator(thrust::make_tuple(vertices, contains.begin()));
CUGRAPH_EXPECTS(thrust::count_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vc_pair_first,
vc_pair_first + num_vertices,
[] __device__(auto pair) {
auto v = thrust::get<0>(pair);
auto c = thrust::get<1>(pair);
return v == invalid_vertex_id<vertex_t>::value
? (c == true)
: (c == false);
}) == 0,
"Invalid input arguments: vertices have elements that are missing in "
"(aggregate) renumber_map_labels.");
}
// FIXME: a temporary workaround for https://github.com/NVIDIA/cuCollections/issues/74
#if 1
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vertices,
vertices + num_vertices,
vertices,
[view = renumber_map_ptr->get_device_view()] __device__(auto v) {
return v != invalid_vertex_id<vertex_t>::value
? view.find(v)->second.load(cuda::std::memory_order_relaxed)
: invalid_vertex_id<vertex_t>::value;
});
#else
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (num_vertices > 0) { renumber_map_ptr->find(vertices, vertices + num_vertices, vertices); }
#endif
#endif
}
template <typename vertex_t>
void unrenumber_local_int_vertices(
raft::handle_t const& handle,
vertex_t* vertices /* [INOUT] */,
size_t num_vertices,
vertex_t const* renumber_map_labels /* size = local_int_vertex_last - local_int_vertex_first */,
vertex_t local_int_vertex_first,
vertex_t local_int_vertex_last,
bool do_expensive_check)
{
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(handle.get_device_properties().major >= 7,
"unrenumber_local_vertices() not supported on Pascal and older architectures.");
#ifdef CUCO_STATIC_MAP_DEFINED
if (do_expensive_check) {
CUGRAPH_EXPECTS(
thrust::count_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vertices,
vertices + num_vertices,
[local_int_vertex_first, local_int_vertex_last] __device__(auto v) {
return v != invalid_vertex_id<vertex_t>::value &&
(v < local_int_vertex_first || v >= local_int_vertex_last);
}) == 0,
"Invalid input arguments: there are non-local vertices in [vertices, vertices "
"+ num_vertices).");
}
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vertices,
vertices + num_vertices,
vertices,
[renumber_map_labels, local_int_vertex_first] __device__(auto v) {
return v == invalid_vertex_id<vertex_t>::value
? v
: renumber_map_labels[v - local_int_vertex_first];
});
#endif
}
template <typename vertex_t, bool multi_gpu>
void unrenumber_int_vertices(raft::handle_t const& handle,
vertex_t* vertices /* [INOUT] */,
size_t num_vertices,
vertex_t const* renumber_map_labels,
vertex_t local_int_vertex_first,
vertex_t local_int_vertex_last,
std::vector<vertex_t>& vertex_partition_lasts,
bool do_expensive_check)
{
double constexpr load_factor = 0.7;
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(handle.get_device_properties().major >= 7,
"unrenumber_vertices() not supported on Pascal and older architectures.");
#ifdef CUCO_STATIC_MAP_DEFINED
if (do_expensive_check) {
CUGRAPH_EXPECTS(
thrust::count_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vertices,
vertices + num_vertices,
[int_vertex_last = vertex_partition_lasts.back()] __device__(auto v) {
return v != invalid_vertex_id<vertex_t>::value &&
!is_valid_vertex(int_vertex_last, v);
}) == 0,
"Invalid input arguments: there are out-of-range vertices in [vertices, vertices "
"+ num_vertices).");
}
if (multi_gpu) {
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
rmm::device_uvector<vertex_t> sorted_unique_int_vertices(num_vertices, handle.get_stream());
sorted_unique_int_vertices.resize(
thrust::distance(
sorted_unique_int_vertices.begin(),
thrust::copy_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vertices,
vertices + num_vertices,
sorted_unique_int_vertices.begin(),
[] __device__(auto v) { return v != invalid_vertex_id<vertex_t>::value; })),
handle.get_stream());
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_unique_int_vertices.begin(),
sorted_unique_int_vertices.end());
sorted_unique_int_vertices.resize(
thrust::distance(
sorted_unique_int_vertices.begin(),
thrust::unique(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_unique_int_vertices.begin(),
sorted_unique_int_vertices.end())),
handle.get_stream());
rmm::device_uvector<vertex_t> d_vertex_partition_lasts(vertex_partition_lasts.size(),
handle.get_stream());
raft::update_device(d_vertex_partition_lasts.data(),
vertex_partition_lasts.data(),
vertex_partition_lasts.size(),
handle.get_stream());
rmm::device_uvector<size_t> d_tx_int_vertex_offsets(d_vertex_partition_lasts.size(),
handle.get_stream());
thrust::lower_bound(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_unique_int_vertices.begin(),
sorted_unique_int_vertices.end(),
d_vertex_partition_lasts.begin(),
d_vertex_partition_lasts.end(),
d_tx_int_vertex_offsets.begin());
std::vector<size_t> h_tx_int_vertex_counts(d_tx_int_vertex_offsets.size());
raft::update_host(h_tx_int_vertex_counts.data(),
d_tx_int_vertex_offsets.data(),
d_tx_int_vertex_offsets.size(),
handle.get_stream());
handle.get_stream_view().synchronize();
std::adjacent_difference(
h_tx_int_vertex_counts.begin(), h_tx_int_vertex_counts.end(), h_tx_int_vertex_counts.begin());
rmm::device_uvector<vertex_t> rx_int_vertices(0, handle.get_stream());
std::vector<size_t> rx_int_vertex_counts{};
std::tie(rx_int_vertices, rx_int_vertex_counts) = shuffle_values(
comm, sorted_unique_int_vertices.begin(), h_tx_int_vertex_counts, handle.get_stream());
auto tx_ext_vertices = std::move(rx_int_vertices);
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
tx_ext_vertices.begin(),
tx_ext_vertices.end(),
tx_ext_vertices.begin(),
[renumber_map_labels, local_int_vertex_first] __device__(auto v) {
return renumber_map_labels[v - local_int_vertex_first];
});
rmm::device_uvector<vertex_t> rx_ext_vertices_for_sorted_unique_int_vertices(
0, handle.get_stream());
std::tie(rx_ext_vertices_for_sorted_unique_int_vertices, std::ignore) =
shuffle_values(comm, tx_ext_vertices.begin(), rx_int_vertex_counts, handle.get_stream());
handle.get_stream_view().synchronize(); // cuco::static_map currently does not take stream
cuco::static_map<vertex_t, vertex_t> unrenumber_map(
// FIXME: std::max(..., ...) as a temporary workaround for
// https://github.com/NVIDIA/cuCollections/issues/72 and
// https://github.com/NVIDIA/cuCollections/issues/73
std::max(
static_cast<size_t>(static_cast<double>(sorted_unique_int_vertices.size()) / load_factor),
sorted_unique_int_vertices.size() + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value);
auto pair_first = thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(sorted_unique_int_vertices.begin(),
rx_ext_vertices_for_sorted_unique_int_vertices.begin())),
[] __device__(auto val) {
return thrust::make_pair(thrust::get<0>(val), thrust::get<1>(val));
});
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (sorted_unique_int_vertices.size()) {
unrenumber_map.insert(pair_first, pair_first + sorted_unique_int_vertices.size());
}
// FIXME: a temporary workaround for https://github.com/NVIDIA/cuCollections/issues/74
#if 1
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vertices,
vertices + num_vertices,
vertices,
[view = unrenumber_map.get_device_view()] __device__(auto v) {
return v != invalid_vertex_id<vertex_t>::value
? view.find(v)->second.load(cuda::std::memory_order_relaxed)
: invalid_vertex_id<vertex_t>::value;
});
#else
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (num_vertices > 0) { unrenumber_map.find(vertices, vertices + num_vertices, vertices); }
#endif
} else {
unrenumber_local_int_vertices(handle,
vertices,
num_vertices,
renumber_map_labels,
local_int_vertex_first,
local_int_vertex_last,
do_expensive_check);
}
#endif
}
// explicit instantiation
template void renumber_ext_vertices<int32_t, false>(raft::handle_t const& handle,
int32_t* vertices,
size_t num_vertices,
int32_t const* renumber_map_labels,
int32_t local_int_vertex_first,
int32_t local_int_vertex_last,
bool do_expensive_check);
template void renumber_ext_vertices<int32_t, true>(raft::handle_t const& handle,
int32_t* vertices,
size_t num_vertices,
int32_t const* renumber_map_labels,
int32_t local_int_vertex_first,
int32_t local_int_vertex_last,
bool do_expensive_check);
template void renumber_ext_vertices<int64_t, false>(raft::handle_t const& handle,
int64_t* vertices,
size_t num_vertices,
int64_t const* renumber_map_labels,
int64_t local_int_vertex_first,
int64_t local_int_vertex_last,
bool do_expensive_check);
template void renumber_ext_vertices<int64_t, true>(raft::handle_t const& handle,
int64_t* vertices,
size_t num_vertices,
int64_t const* renumber_map_labels,
int64_t local_int_vertex_first,
int64_t local_int_vertex_last,
bool do_expensive_check);
template void unrenumber_local_int_vertices<int32_t>(raft::handle_t const& handle,
int32_t* vertices,
size_t num_vertices,
int32_t const* renumber_map_labels,
int32_t local_int_vertex_first,
int32_t local_int_vertex_last,
bool do_expensive_check);
template void unrenumber_local_int_vertices<int64_t>(raft::handle_t const& handle,
int64_t* vertices,
size_t num_vertices,
int64_t const* renumber_map_labels,
int64_t local_int_vertex_first,
int64_t local_int_vertex_last,
bool do_expensive_check);
template void unrenumber_int_vertices<int32_t, false>(raft::handle_t const& handle,
int32_t* vertices,
size_t num_vertices,
int32_t const* renumber_map_labels,
int32_t local_int_vertex_first,
int32_t local_int_vertex_last,
std::vector<int32_t>& vertex_partition_lasts,
bool do_expensive_check);
template void unrenumber_int_vertices<int32_t, true>(raft::handle_t const& handle,
int32_t* vertices,
size_t num_vertices,
int32_t const* renumber_map_labels,
int32_t local_int_vertex_first,
int32_t local_int_vertex_last,
std::vector<int32_t>& vertex_partition_lasts,
bool do_expensive_check);
template void unrenumber_int_vertices<int64_t, false>(raft::handle_t const& handle,
int64_t* vertices,
size_t num_vertices,
int64_t const* renumber_map_labels,
int64_t local_int_vertex_first,
int64_t local_int_vertex_last,
std::vector<int64_t>& vertex_partition_lasts,
bool do_expensive_check);
template void unrenumber_int_vertices<int64_t, true>(raft::handle_t const& handle,
int64_t* vertices,
size_t num_vertices,
int64_t const* renumber_map_labels,
int64_t local_int_vertex_first,
int64_t local_int_vertex_last,
std::vector<int64_t>& vertex_partition_lasts,
bool do_expensive_check);
} // namespace experimental
} // namespace cugraph
|
7056d7df6a3b4cebc12cd5b6304e12351a92dd59.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hip_runtime.h"
//works for complex planar to complex planar; T should be float or double
template<typename T, int micro_tile_col_size, int micro_tile_row_size, int wg_col_size, int wg_row_size>
__global__ void transpose_kernel_outplace_complex_planar_to_complex_planar(hipLaunchParm lp,
T *input_matrix_real,
T *input_matrix_imag,
T *output_matrix_real,
T *output_matrix_imag,
size_t input_row_size,
size_t input_col_size,
size_t input_leading_dim_size,
size_t output_leading_dim_size,
size_t batch_size)
{
// WG size can be assumed to be 16 by 16 in most cases
size_t local_idx_0 = hipThreadIdx_x;// 0-15
size_t local_idx_1 = hipThreadIdx_y;// 0-15
size_t block_idx_0 = hipBlockIdx_x;// index of work groups
size_t block_idx_1 = hipBlockIdx_y;
size_t block_dim_0 = hipBlockDim_x;// size of work groups 16
size_t block_dim_1 = hipBlockDim_y;// size of work groups 16
size_t grid_dim_0 = hipGridDim_x;// number of blocks only the first dimension is loaded to handle batch size
// for 64 x 64 macro tile size we will need 16 x 4 x 64 blocks (4 x 64 == 16 x 16)
// for 32 x 32 macro tile size we will need 4 x 8 x 32 blocks
const size_t macro_tile_col_size = micro_tile_col_size * wg_col_size;
const size_t macro_tile_row_size = micro_tile_row_size * wg_row_size;
const size_t reshape_factor = macro_tile_col_size / block_dim_0; // 64 / 16 = 4 need to fit 4 rows into one row in LDS; 32 / 16 = 2
const size_t unroll_factor = macro_tile_row_size / (block_dim_1 / reshape_factor); // 64 / (16 / 4) = 16; 32 / (16 / 2) = 4
__shared__ T lds_real[macro_tile_row_size][macro_tile_col_size];
__shared__ T lds_imag[macro_tile_row_size][macro_tile_col_size];
size_t blocks_per_batch = grid_dim_0 / batch_size;
size_t batch_idx = block_idx_0 / blocks_per_batch;
input_matrix_real += batch_idx * input_leading_dim_size * input_row_size;
input_matrix_imag += batch_idx * input_leading_dim_size * input_row_size;
size_t input_offset = 0;
input_offset += input_leading_dim_size * block_idx_1 * macro_tile_row_size;
input_offset += (block_idx_0 % blocks_per_batch) * macro_tile_col_size;
input_matrix_real += input_offset;
input_matrix_imag += input_offset;
for(int i = 0; i < unroll_factor; i++)
{
//each iteration 256 work items will read from a 4 x 64 subblock
//there are 16 iterations
size_t subblock_idx_0 = local_idx_0 + (local_idx_1 % reshape_factor) * block_dim_0; // local_idx_0 + (local_idx_1 % 4) * 16
size_t subblock_idx_1 = local_idx_1 / reshape_factor + i * (block_dim_1 / reshape_factor);
//transpose happened here
lds_real[subblock_idx_0][subblock_idx_1] = input_matrix_real[subblock_idx_1 * input_leading_dim_size + subblock_idx_0];
lds_imag[subblock_idx_0][subblock_idx_1] = input_matrix_imag[subblock_idx_1 * input_leading_dim_size + subblock_idx_0];
}
__syncthreads();
output_matrix_real += batch_idx * input_col_size * output_leading_dim_size;
output_matrix_imag += batch_idx * input_col_size * output_leading_dim_size;
size_t output_offset = 0;
output_offset += output_leading_dim_size * (block_idx_0 % blocks_per_batch) * macro_tile_row_size;//input_row_size == ouput_col_size
output_offset += block_idx_1 * macro_tile_col_size;
output_matrix_real += output_offset;
output_matrix_imag += output_offset;
for(int i = 0; i < unroll_factor; i++)
{
size_t subblock_idx_0 = local_idx_0 + (local_idx_1 % reshape_factor) * block_dim_0;// 0-63
size_t subblock_idx_1 = local_idx_1 / reshape_factor + i * (block_dim_1 / reshape_factor);// 0-3, 4-7 ... 60-63
output_matrix_real[subblock_idx_1 * output_leading_dim_size + subblock_idx_0] = lds_real[subblock_idx_1][subblock_idx_0];
output_matrix_imag[subblock_idx_1 * output_leading_dim_size + subblock_idx_0] = lds_imag[subblock_idx_1][subblock_idx_0];
}
}
|
7056d7df6a3b4cebc12cd5b6304e12351a92dd59.cu
|
#include "hip_runtime.h"
//works for complex planar to complex planar; T should be float or double
template<typename T, int micro_tile_col_size, int micro_tile_row_size, int wg_col_size, int wg_row_size>
__global__ void transpose_kernel_outplace_complex_planar_to_complex_planar(hipLaunchParm lp,
T *input_matrix_real,
T *input_matrix_imag,
T *output_matrix_real,
T *output_matrix_imag,
size_t input_row_size,
size_t input_col_size,
size_t input_leading_dim_size,
size_t output_leading_dim_size,
size_t batch_size)
{
// WG size can be assumed to be 16 by 16 in most cases
size_t local_idx_0 = hipThreadIdx_x;// 0-15
size_t local_idx_1 = hipThreadIdx_y;// 0-15
size_t block_idx_0 = hipBlockIdx_x;// index of work groups
size_t block_idx_1 = hipBlockIdx_y;
size_t block_dim_0 = hipBlockDim_x;// size of work groups 16
size_t block_dim_1 = hipBlockDim_y;// size of work groups 16
size_t grid_dim_0 = hipGridDim_x;// number of blocks only the first dimension is loaded to handle batch size
// for 64 x 64 macro tile size we will need 16 x 4 x 64 blocks (4 x 64 == 16 x 16)
// for 32 x 32 macro tile size we will need 4 x 8 x 32 blocks
const size_t macro_tile_col_size = micro_tile_col_size * wg_col_size;
const size_t macro_tile_row_size = micro_tile_row_size * wg_row_size;
const size_t reshape_factor = macro_tile_col_size / block_dim_0; // 64 / 16 = 4 need to fit 4 rows into one row in LDS; 32 / 16 = 2
const size_t unroll_factor = macro_tile_row_size / (block_dim_1 / reshape_factor); // 64 / (16 / 4) = 16; 32 / (16 / 2) = 4
__shared__ T lds_real[macro_tile_row_size][macro_tile_col_size];
__shared__ T lds_imag[macro_tile_row_size][macro_tile_col_size];
size_t blocks_per_batch = grid_dim_0 / batch_size;
size_t batch_idx = block_idx_0 / blocks_per_batch;
input_matrix_real += batch_idx * input_leading_dim_size * input_row_size;
input_matrix_imag += batch_idx * input_leading_dim_size * input_row_size;
size_t input_offset = 0;
input_offset += input_leading_dim_size * block_idx_1 * macro_tile_row_size;
input_offset += (block_idx_0 % blocks_per_batch) * macro_tile_col_size;
input_matrix_real += input_offset;
input_matrix_imag += input_offset;
for(int i = 0; i < unroll_factor; i++)
{
//each iteration 256 work items will read from a 4 x 64 subblock
//there are 16 iterations
size_t subblock_idx_0 = local_idx_0 + (local_idx_1 % reshape_factor) * block_dim_0; // local_idx_0 + (local_idx_1 % 4) * 16
size_t subblock_idx_1 = local_idx_1 / reshape_factor + i * (block_dim_1 / reshape_factor);
//transpose happened here
lds_real[subblock_idx_0][subblock_idx_1] = input_matrix_real[subblock_idx_1 * input_leading_dim_size + subblock_idx_0];
lds_imag[subblock_idx_0][subblock_idx_1] = input_matrix_imag[subblock_idx_1 * input_leading_dim_size + subblock_idx_0];
}
__syncthreads();
output_matrix_real += batch_idx * input_col_size * output_leading_dim_size;
output_matrix_imag += batch_idx * input_col_size * output_leading_dim_size;
size_t output_offset = 0;
output_offset += output_leading_dim_size * (block_idx_0 % blocks_per_batch) * macro_tile_row_size;//input_row_size == ouput_col_size
output_offset += block_idx_1 * macro_tile_col_size;
output_matrix_real += output_offset;
output_matrix_imag += output_offset;
for(int i = 0; i < unroll_factor; i++)
{
size_t subblock_idx_0 = local_idx_0 + (local_idx_1 % reshape_factor) * block_dim_0;// 0-63
size_t subblock_idx_1 = local_idx_1 / reshape_factor + i * (block_dim_1 / reshape_factor);// 0-3, 4-7 ... 60-63
output_matrix_real[subblock_idx_1 * output_leading_dim_size + subblock_idx_0] = lds_real[subblock_idx_1][subblock_idx_0];
output_matrix_imag[subblock_idx_1 * output_leading_dim_size + subblock_idx_0] = lds_imag[subblock_idx_1][subblock_idx_0];
}
}
|
3f04dcb56a5df37f5a249f1c9c0b41b3eeccb6ff.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mLibCuda.h"
#include "GlobalDefines.h"
#include <math_constants.h>
#define THREADS_PER_BLOCK 8
#ifdef USE_LIE_SPACE
#include "Solver/LieDerivUtil.h"
#else
//! assumes z-y-x rotation composition (euler angles)
__device__ void matrixToPose(const float4x4& matrix, float3& rot, float3& trans)
{
trans = make_float3(matrix(0,3), matrix(1,3), matrix(2,3));
rot = make_float3(0.0f);
const float eps = 0.0001f;
float psi, theta, phi; // x,y,z axis angles
if (matrix(2, 0) > -1+eps && matrix(2, 0) < 1-eps) { // R(2, 0) != +/- 1
//if (abs(matrix(2, 0) - 1) > eps && abs(matrix(2, 0) + 1) > eps) { // R(2, 0) != +/- 1
theta = -asin(matrix(2, 0)); // \pi - theta
float costheta = cos(theta);
psi = atan2(matrix(2, 1) / costheta, matrix(2, 2) / costheta);
phi = atan2(matrix(1, 0) / costheta, matrix(0, 0) / costheta);
}
else {
phi = 0;
if (matrix(2, 0) <= -1 + eps) {
//if (abs(matrix(2, 0) + 1) < eps) { // R(2, 0) == -1
//if (abs(matrix(2, 0) + 1) > eps) {
theta = CUDART_PI_F / 2.0f;
psi = phi + atan2(matrix(0, 1), matrix(0, 2));
}
else { // R(2, 0) == 1
theta = -CUDART_PI_F / 2.0f;
psi = -phi + atan2(-matrix(0, 1), -matrix(0, 2));
}
}
rot = make_float3(psi, theta, phi);
}
//! assumes z-y-x rotation composition (euler angles)
__device__ void poseToMatrix(const float3& rot, const float3& trans, float4x4& matrix)
{
// rotation
const float CosAlpha = cos(rot.x); float CosBeta = cos(rot.y); float CosGamma = cos(rot.z);
const float SinAlpha = sin(rot.x); float SinBeta = sin(rot.y); float SinGamma = sin(rot.z);
matrix.m11 = CosGamma*CosBeta;
matrix.m12 = -SinGamma*CosAlpha+CosGamma*SinBeta*SinAlpha;
matrix.m13 = SinGamma*SinAlpha + CosGamma*SinBeta*CosAlpha;
matrix.m21 = SinGamma*CosBeta;
matrix.m22 = CosGamma*CosAlpha + SinGamma*SinBeta*SinAlpha;
matrix.m23 = -CosGamma*SinAlpha + SinGamma*SinBeta*CosAlpha;
matrix.m31 = -SinBeta;
matrix.m32 = CosBeta*SinAlpha;
matrix.m33 = CosBeta*CosAlpha;
// translation
matrix.m14 = trans.x;
matrix.m24 = trans.y;
matrix.m34 = trans.z;
matrix.m41 = 0.0f;
matrix.m42 = 0.0f;
matrix.m43 = 0.0f;
matrix.m44 = 1.0f;
}
#endif
__global__ void convertMatricesToPosesCU_Kernel(const float4x4* d_transforms, unsigned int numTransforms,
float3* d_rot, float3* d_trans, const int* d_validImages)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numTransforms && d_validImages[idx]) {
matrixToPose(d_transforms[idx], d_rot[idx], d_trans[idx]);
}
}
extern "C" void convertMatricesToPosesCU(const float4x4* d_transforms, unsigned int numTransforms,
float3* d_rot, float3* d_trans, const int* d_validImages)
{
const unsigned int N = numTransforms;
convertMatricesToPosesCU_Kernel << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(d_transforms, numTransforms, d_rot, d_trans, d_validImages);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void convertPosesToMatricesCU_Kernel(const float3* d_rot, const float3* d_trans, unsigned int numImages, float4x4* d_transforms, const int* d_validImages)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numImages && d_validImages[idx]) {
poseToMatrix(d_rot[idx], d_trans[idx], d_transforms[idx]);
}
}
extern "C" void convertPosesToMatricesCU(const float3* d_rot, const float3* d_trans, unsigned int numImages, float4x4* d_transforms, const int* d_validImages)
{
const unsigned int N = numImages;
convertPosesToMatricesCU_Kernel << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(d_rot, d_trans, numImages, d_transforms, d_validImages);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
|
3f04dcb56a5df37f5a249f1c9c0b41b3eeccb6ff.cu
|
#include "mLibCuda.h"
#include "GlobalDefines.h"
#include <math_constants.h>
#define THREADS_PER_BLOCK 8
#ifdef USE_LIE_SPACE
#include "Solver/LieDerivUtil.h"
#else
//! assumes z-y-x rotation composition (euler angles)
__device__ void matrixToPose(const float4x4& matrix, float3& rot, float3& trans)
{
trans = make_float3(matrix(0,3), matrix(1,3), matrix(2,3));
rot = make_float3(0.0f);
const float eps = 0.0001f;
float psi, theta, phi; // x,y,z axis angles
if (matrix(2, 0) > -1+eps && matrix(2, 0) < 1-eps) { // R(2, 0) != +/- 1
//if (abs(matrix(2, 0) - 1) > eps && abs(matrix(2, 0) + 1) > eps) { // R(2, 0) != +/- 1
theta = -asin(matrix(2, 0)); // \pi - theta
float costheta = cos(theta);
psi = atan2(matrix(2, 1) / costheta, matrix(2, 2) / costheta);
phi = atan2(matrix(1, 0) / costheta, matrix(0, 0) / costheta);
}
else {
phi = 0;
if (matrix(2, 0) <= -1 + eps) {
//if (abs(matrix(2, 0) + 1) < eps) { // R(2, 0) == -1
//if (abs(matrix(2, 0) + 1) > eps) {
theta = CUDART_PI_F / 2.0f;
psi = phi + atan2(matrix(0, 1), matrix(0, 2));
}
else { // R(2, 0) == 1
theta = -CUDART_PI_F / 2.0f;
psi = -phi + atan2(-matrix(0, 1), -matrix(0, 2));
}
}
rot = make_float3(psi, theta, phi);
}
//! assumes z-y-x rotation composition (euler angles)
__device__ void poseToMatrix(const float3& rot, const float3& trans, float4x4& matrix)
{
// rotation
const float CosAlpha = cos(rot.x); float CosBeta = cos(rot.y); float CosGamma = cos(rot.z);
const float SinAlpha = sin(rot.x); float SinBeta = sin(rot.y); float SinGamma = sin(rot.z);
matrix.m11 = CosGamma*CosBeta;
matrix.m12 = -SinGamma*CosAlpha+CosGamma*SinBeta*SinAlpha;
matrix.m13 = SinGamma*SinAlpha + CosGamma*SinBeta*CosAlpha;
matrix.m21 = SinGamma*CosBeta;
matrix.m22 = CosGamma*CosAlpha + SinGamma*SinBeta*SinAlpha;
matrix.m23 = -CosGamma*SinAlpha + SinGamma*SinBeta*CosAlpha;
matrix.m31 = -SinBeta;
matrix.m32 = CosBeta*SinAlpha;
matrix.m33 = CosBeta*CosAlpha;
// translation
matrix.m14 = trans.x;
matrix.m24 = trans.y;
matrix.m34 = trans.z;
matrix.m41 = 0.0f;
matrix.m42 = 0.0f;
matrix.m43 = 0.0f;
matrix.m44 = 1.0f;
}
#endif
__global__ void convertMatricesToPosesCU_Kernel(const float4x4* d_transforms, unsigned int numTransforms,
float3* d_rot, float3* d_trans, const int* d_validImages)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numTransforms && d_validImages[idx]) {
matrixToPose(d_transforms[idx], d_rot[idx], d_trans[idx]);
}
}
extern "C" void convertMatricesToPosesCU(const float4x4* d_transforms, unsigned int numTransforms,
float3* d_rot, float3* d_trans, const int* d_validImages)
{
const unsigned int N = numTransforms;
convertMatricesToPosesCU_Kernel << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(d_transforms, numTransforms, d_rot, d_trans, d_validImages);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void convertPosesToMatricesCU_Kernel(const float3* d_rot, const float3* d_trans, unsigned int numImages, float4x4* d_transforms, const int* d_validImages)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numImages && d_validImages[idx]) {
poseToMatrix(d_rot[idx], d_trans[idx], d_transforms[idx]);
}
}
extern "C" void convertPosesToMatricesCU(const float3* d_rot, const float3* d_trans, unsigned int numImages, float4x4* d_transforms, const int* d_validImages)
{
const unsigned int N = numImages;
convertPosesToMatricesCU_Kernel << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(d_rot, d_trans, numImages, d_transforms, d_validImages);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
|
7371d0d2cbd2ae4936aae7c27c6b219b77b544f3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//headers
#include "../common/book.h"
#define N 10
int main(void)
{
//function declaration
__global__ void add(int *a, int *b, int *c);
//variable declaration
int i, a[N], b[N], c[N];
int *dev_a = NULL;
int *dev_b = NULL;
int *dev_c = NULL;
//allocate the memory on the GPU
HANDLE_ERROR(hipMalloc((void**)&dev_a, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_b, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_c, N * sizeof(int)));
//fill the arrays 'a' and 'b' on the CPU
for(i = 0; i < N; i++)
{
a[i] = -i;
b[i] = i * i;
}
//copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR(hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( add), dim3(N), dim3(1), 0, 0, dev_a, dev_b, dev_c);
//copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR(hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost));
//display the results
for(i = 0; i < N; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
//free the memory
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
dev_a = NULL;
dev_b = NULL;
dev_c = NULL;
return (0);
}
__global__ void add(int *a, int *b, int *c)
{
//variable declaration
int tid = blockIdx.x; //handle the data at this index
//code
if(tid < N)
{
c[tid] = a[tid] + b[tid];
}
}
|
7371d0d2cbd2ae4936aae7c27c6b219b77b544f3.cu
|
//headers
#include "../common/book.h"
#define N 10
int main(void)
{
//function declaration
__global__ void add(int *a, int *b, int *c);
//variable declaration
int i, a[N], b[N], c[N];
int *dev_a = NULL;
int *dev_b = NULL;
int *dev_c = NULL;
//allocate the memory on the GPU
HANDLE_ERROR(cudaMalloc((void**)&dev_a, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_b, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_c, N * sizeof(int)));
//fill the arrays 'a' and 'b' on the CPU
for(i = 0; i < N; i++)
{
a[i] = -i;
b[i] = i * i;
}
//copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR(cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice));
add<<<N, 1>>>(dev_a, dev_b, dev_c);
//copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR(cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost));
//display the results
for(i = 0; i < N; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
//free the memory
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
dev_a = NULL;
dev_b = NULL;
dev_c = NULL;
return (0);
}
__global__ void add(int *a, int *b, int *c)
{
//variable declaration
int tid = blockIdx.x; //handle the data at this index
//code
if(tid < N)
{
c[tid] = a[tid] + b[tid];
}
}
|
9b0cde763b3329a5c8b55d9f32bf6593031593b4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "TPZCudaCalls.h"
#include "pzreal.h"
#include "pzvec.h"
// #include "MatMulKernels.h"
#include "KernelsComputeSigma.h"
#include "KernelsMatMul.h"
#include "KernelsMatrixAssemble.h"
#define NT 64
TPZCudaCalls::TPZCudaCalls() {
cusparse_h = false;
cublas_h = false;
}
TPZCudaCalls::~TPZCudaCalls() {
if(cublas_h == true) {
hipblasDestroy(handle_cublas);
}
if(cusparse_h == true) {
hipsparseDestroy(handle_cusparse);
}
}
TPZCudaCalls &TPZCudaCalls::operator=(const TPZCudaCalls ©) {
if(© == this){
return *this;
}
handle_cusparse = copy.handle_cusparse;
cusparse_h = copy.cusparse_h;
handle_cublas = copy.handle_cublas;
cublas_h = copy.cublas_h;
return *this;
}
void TPZCudaCalls::Multiply(bool trans, int *m, int *n, int *k, REAL *A, int *strideA,
REAL *B, int *strideB, REAL *C, int *strideC, REAL alpha, int nmatrices) {
int numBlocks = (nmatrices + NT - 1) / NT;
hipLaunchKernelGGL(( MatrixMultiplicationKernel), dim3(numBlocks),dim3(NT), 0, 0, trans, m, n, k, A, strideA, B, strideB, C, strideC, alpha, nmatrices);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
std::string error_string = hipGetErrorString(error);
std::string error_message = "failed to perform MatrixMultiplicationKernel: " + error_string;
throw std::runtime_error(error_message);
}
}
void TPZCudaCalls::GatherOperation(int n, REAL *x, REAL *y, int *id) {
if(cusparse_h == false) {
cusparse_h = true;
hipsparseStatus_t result = hipsparseCreate(&handle_cusparse);
if (result != HIPSPARSE_STATUS_SUCCESS) {
throw std::runtime_error("failed to initialize cuSparse");
}
}
hipsparseStatus_t result = hipsparseDgthr(handle_cusparse, n, x, y, id, HIPSPARSE_INDEX_BASE_ZERO);
if (result != HIPSPARSE_STATUS_SUCCESS) {
throw std::runtime_error("failed to perform hipsparseDgthr");
}
}
void TPZCudaCalls::ScatterOperation(int n, REAL *x, REAL *y, int *id) {
if(cusparse_h == false) {
cusparse_h = true;
hipsparseStatus_t result = hipsparseCreate(&handle_cusparse);
if (result != HIPSPARSE_STATUS_SUCCESS) {
throw std::runtime_error("failed to initialize cuSparse");
}
}
hipsparseStatus_t result = cusparseDsctr(handle_cusparse, n, x, id, y, HIPSPARSE_INDEX_BASE_ZERO);
if (result != HIPSPARSE_STATUS_SUCCESS) {
throw std::runtime_error("failed to perform cusparseDsctr");
}
}
void TPZCudaCalls::DaxpyOperation(int n, double alpha, double *x, double *y) {
if(cublas_h == false) {
cublas_h = true;
hipblasStatus_t result = hipblasCreate(&handle_cublas);
if (result != HIPBLAS_STATUS_SUCCESS) {
throw std::runtime_error("failed to initialize cuBLAS");
}
}
hipblasStatus_t result = hipblasDaxpy(handle_cublas, n, &alpha, x, 1., y, 1.);
if (result != HIPBLAS_STATUS_SUCCESS) {
throw std::runtime_error("failed to perform hipblasDaxpy");
}
}
void TPZCudaCalls::SpMV(int opt, int sym, int m, int k, int nnz, REAL alpha, REAL *csrVal, int *csrRowPtr, int *csrColInd, REAL *B, REAL *C) {
if(cusparse_h == false) {
cusparse_h = true;
hipsparseStatus_t result = hipsparseCreate(&handle_cusparse);
if (result != HIPSPARSE_STATUS_SUCCESS) {
throw std::runtime_error("failed to initialize cuSparse");
}
}
hipsparseMatDescr_t descr;
hipsparseCreateMatDescr(&descr);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
if(sym == 0) {
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
}
else {
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_SYMMETRIC);
}
hipsparseOperation_t op;
if(opt == 0) {
op = HIPSPARSE_OPERATION_NON_TRANSPOSE;
} else {
op = HIPSPARSE_OPERATION_TRANSPOSE;
}
REAL beta = 0.;
hipsparseStatus_t result = hipsparseDcsrmv(handle_cusparse, op, m, k, nnz, &alpha, descr, csrVal, csrRowPtr, csrColInd, B, &beta, C);
if (result != HIPSPARSE_STATUS_SUCCESS) {
throw std::runtime_error("failed to perform hipsparseDcsrmv");
}
}
void TPZCudaCalls::SpMSpM(int opt, int sym, int m, int n, int k, int nnzA, REAL *csrValA, int *csrRowPtrA, int *csrColIndA,
int nnzB, REAL *csrValB, int *csrRowPtrB, int *csrColIndB,
int nnzC, REAL *csrValC, int *csrRowPtrC) {
if(cusparse_h == false) {
cusparse_h = true;
hipsparseStatus_t result = hipsparseCreate(&handle_cusparse);
if (result != HIPSPARSE_STATUS_SUCCESS) {
throw std::runtime_error("failed to initialize cuSparse");
}
}
hipsparseMatDescr_t descr;
hipsparseCreateMatDescr(&descr);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
if(sym == 0) {
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
}
else {
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_SYMMETRIC);
}
hipsparseOperation_t op;
if(opt == 0) {
op = HIPSPARSE_OPERATION_NON_TRANSPOSE;
} else {
op = HIPSPARSE_OPERATION_TRANSPOSE;
}
int *csrColIndC;
hipMalloc((void**)&csrColIndC, sizeof(int)*nnzC);
hipsparseStatus_t result = hipsparseDcsrgemm(handle_cusparse, op, HIPSPARSE_OPERATION_NON_TRANSPOSE, m, n, k,
descr, nnzA, csrValA, csrRowPtrA, csrColIndA,
descr, nnzB, csrValB, csrRowPtrB, csrColIndB,
descr, csrValC, csrRowPtrC, csrColIndC);
if (result != HIPSPARSE_STATUS_SUCCESS) {
throw std::runtime_error("failed to perform hipsparseDcsrgemm");
}
}
void TPZCudaCalls::ComputeSigma(bool update_mem, int npts, REAL *glob_delta_strain, REAL *glob_sigma, REAL lambda, REAL mu, REAL mc_phi, REAL mc_psi, REAL mc_cohesion, REAL *dPlasticStrain,
REAL *dMType, REAL *dAlpha, REAL *dSigma, REAL *dStrain, REAL *weight) {
int numBlocks = (npts + NT - 1) / NT;
hipLaunchKernelGGL(( ComputeSigmaKernel), dim3(numBlocks),dim3(NT), 0, 0, update_mem, npts, glob_delta_strain, glob_sigma, lambda, mu, mc_phi, mc_psi, mc_cohesion, dPlasticStrain, dMType, dAlpha, dSigma, dStrain, weight);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
std::string error_string = hipGetErrorString(error);
std::string error_message = "failed to perform ComputeSigmaKernel: " + error_string;
throw std::runtime_error(error_message);
}
}
void TPZCudaCalls::ComputeSigmaDep(bool update_mem, int npts, REAL *glob_delta_strain, REAL *glob_sigma, REAL *glob_dep, REAL lambda, REAL mu, REAL mc_phi, REAL mc_psi, REAL mc_cohesion, REAL *dPlasticStrain,
REAL *dMType, REAL *dAlpha, REAL *dSigma, REAL *dStrain, REAL *weight) {
int numBlocks = (npts + NT - 1) / NT;
hipLaunchKernelGGL(( ComputeSigmaDepKernel), dim3(numBlocks),dim3(NT), 0, 0, update_mem, npts, glob_delta_strain, glob_sigma, glob_dep, lambda, mu, mc_phi, mc_psi, mc_cohesion, dPlasticStrain, dMType, dAlpha, dSigma, dStrain, weight);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
std::string error_string = hipGetErrorString(error);
std::string error_message = "failed to perform ComputeSigmaDepKernel: " + error_string;
throw std::runtime_error(error_message);
}
}
void TPZCudaCalls::MatrixAssemble(REAL *Kc, REAL *dep, int nel, int *el_color_index,
REAL *storage, int *rowsizes, int *colsizes, int *rowfirstindex, int *colfirstindex, int *matrixposition, int *matrixstride) {
int numBlocks = (nel + NT_sm - 1) / NT_sm;
hipLaunchKernelGGL(( MatrixAssembleKernel), dim3(numBlocks),dim3(NT_sm), 0, 0, nel, Kc, dep, el_color_index, storage, rowsizes, colsizes, rowfirstindex, colfirstindex, matrixposition, matrixstride);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
std::string error_string = hipGetErrorString(error);
std::string error_message = "failed to perform MatrixAssembleKernel: " + error_string;
throw std::runtime_error(error_message);
}
}
void TPZCudaCalls::DeToDevice(REAL lambda, REAL mu) {
REAL De_host[] = {lambda + 2.0*mu, 0, lambda, 0, mu, 0, lambda, 0, lambda + 2.0*mu};
hipMemcpyToSymbol(De, &De_host, 9 * sizeof(REAL));
}
void TPZCudaCalls::SolveCG(int n, int nnzA, REAL *csrValA, int *csrRowPtrA, int *csrColIndA, REAL *r, REAL *x) {
if(cusparse_h == false) {
cusparse_h = true;
hipsparseStatus_t result = hipsparseCreate(&handle_cusparse);
if (result != HIPSPARSE_STATUS_SUCCESS) {
throw std::runtime_error("failed to initialize cuSparse");
}
}
if(cublas_h == false) {
cublas_h = true;
hipblasStatus_t result = hipblasCreate(&handle_cublas);
if (result != HIPBLAS_STATUS_SUCCESS) {
throw std::runtime_error("failed to initialize cuBLAS");
}
}
hipsparseMatDescr_t descr;
hipsparseCreateMatDescr(&descr);
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_SYMMETRIC);
hipsparseSetMatFillMode(descr, HIPSPARSE_FILL_MODE_UPPER);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
REAL alpha = 1.0;
REAL alpham1 = -1.0;
REAL beta = 0.0;
REAL r0 = 0.;
REAL b;
REAL r1;
REAL dot;
REAL a;
REAL na;
REAL *d_Ax;
REAL *d_p;
hipMalloc((void **)&d_Ax, n*sizeof(REAL));
hipMalloc((void **)&d_p, n*sizeof(REAL));
hipsparseDcsrmv(handle_cusparse,HIPSPARSE_OPERATION_NON_TRANSPOSE, n, n, nnzA, &alpha, descr, csrValA, csrRowPtrA, csrColIndA, x, &beta, d_Ax);
hipblasDaxpy(handle_cublas, n, &alpham1, d_Ax, 1, r, 1);
hipblasDdot(handle_cublas, n, r, 1, r, 1, &r1);
const REAL tol = 1.e-5;
const int max_iter = 10000;
int k;
k = 1;
while (r1 > tol*tol && k <= max_iter)
{
if (k > 1)
{
b = r1 / r0;
hipblasDscal(handle_cublas, n, &b, d_p, 1);
hipblasDaxpy(handle_cublas, n, &alpha, r, 1, d_p, 1);
}
else
{
hipblasDcopy(handle_cublas, n, r, 1, d_p, 1);
}
hipsparseDcsrmv(handle_cusparse, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, n, nnzA, &alpha, descr, csrValA, csrRowPtrA, csrColIndA, d_p, &beta, d_Ax);
hipblasDdot(handle_cublas, n, d_p, 1, d_Ax, 1, &dot);
a = r1 / dot;
hipblasDaxpy(handle_cublas, n, &a, d_p, 1, x, 1);
na = -a;
hipblasDaxpy(handle_cublas, n, &na, d_Ax, 1, r, 1);
r0 = r1;
hipblasDdot(handle_cublas, n, r, 1, r, 1, &r1);
hipDeviceSynchronize();
k++;
}
hipFree(d_p);
hipFree(d_Ax);
}
|
9b0cde763b3329a5c8b55d9f32bf6593031593b4.cu
|
#include "TPZCudaCalls.h"
#include "pzreal.h"
#include "pzvec.h"
// #include "MatMulKernels.h"
#include "KernelsComputeSigma.h"
#include "KernelsMatMul.h"
#include "KernelsMatrixAssemble.h"
#define NT 64
TPZCudaCalls::TPZCudaCalls() {
cusparse_h = false;
cublas_h = false;
}
TPZCudaCalls::~TPZCudaCalls() {
if(cublas_h == true) {
cublasDestroy(handle_cublas);
}
if(cusparse_h == true) {
cusparseDestroy(handle_cusparse);
}
}
TPZCudaCalls &TPZCudaCalls::operator=(const TPZCudaCalls ©) {
if(© == this){
return *this;
}
handle_cusparse = copy.handle_cusparse;
cusparse_h = copy.cusparse_h;
handle_cublas = copy.handle_cublas;
cublas_h = copy.cublas_h;
return *this;
}
void TPZCudaCalls::Multiply(bool trans, int *m, int *n, int *k, REAL *A, int *strideA,
REAL *B, int *strideB, REAL *C, int *strideC, REAL alpha, int nmatrices) {
int numBlocks = (nmatrices + NT - 1) / NT;
MatrixMultiplicationKernel<<<numBlocks,NT>>> (trans, m, n, k, A, strideA, B, strideB, C, strideC, alpha, nmatrices);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
std::string error_string = cudaGetErrorString(error);
std::string error_message = "failed to perform MatrixMultiplicationKernel: " + error_string;
throw std::runtime_error(error_message);
}
}
void TPZCudaCalls::GatherOperation(int n, REAL *x, REAL *y, int *id) {
if(cusparse_h == false) {
cusparse_h = true;
cusparseStatus_t result = cusparseCreate(&handle_cusparse);
if (result != CUSPARSE_STATUS_SUCCESS) {
throw std::runtime_error("failed to initialize cuSparse");
}
}
cusparseStatus_t result = cusparseDgthr(handle_cusparse, n, x, y, id, CUSPARSE_INDEX_BASE_ZERO);
if (result != CUSPARSE_STATUS_SUCCESS) {
throw std::runtime_error("failed to perform cusparseDgthr");
}
}
void TPZCudaCalls::ScatterOperation(int n, REAL *x, REAL *y, int *id) {
if(cusparse_h == false) {
cusparse_h = true;
cusparseStatus_t result = cusparseCreate(&handle_cusparse);
if (result != CUSPARSE_STATUS_SUCCESS) {
throw std::runtime_error("failed to initialize cuSparse");
}
}
cusparseStatus_t result = cusparseDsctr(handle_cusparse, n, x, id, y, CUSPARSE_INDEX_BASE_ZERO);
if (result != CUSPARSE_STATUS_SUCCESS) {
throw std::runtime_error("failed to perform cusparseDsctr");
}
}
void TPZCudaCalls::DaxpyOperation(int n, double alpha, double *x, double *y) {
if(cublas_h == false) {
cublas_h = true;
cublasStatus_t result = cublasCreate(&handle_cublas);
if (result != CUBLAS_STATUS_SUCCESS) {
throw std::runtime_error("failed to initialize cuBLAS");
}
}
cublasStatus_t result = cublasDaxpy(handle_cublas, n, &alpha, x, 1., y, 1.);
if (result != CUBLAS_STATUS_SUCCESS) {
throw std::runtime_error("failed to perform cublasDaxpy");
}
}
void TPZCudaCalls::SpMV(int opt, int sym, int m, int k, int nnz, REAL alpha, REAL *csrVal, int *csrRowPtr, int *csrColInd, REAL *B, REAL *C) {
if(cusparse_h == false) {
cusparse_h = true;
cusparseStatus_t result = cusparseCreate(&handle_cusparse);
if (result != CUSPARSE_STATUS_SUCCESS) {
throw std::runtime_error("failed to initialize cuSparse");
}
}
cusparseMatDescr_t descr;
cusparseCreateMatDescr(&descr);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
if(sym == 0) {
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
}
else {
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_SYMMETRIC);
}
cusparseOperation_t op;
if(opt == 0) {
op = CUSPARSE_OPERATION_NON_TRANSPOSE;
} else {
op = CUSPARSE_OPERATION_TRANSPOSE;
}
REAL beta = 0.;
cusparseStatus_t result = cusparseDcsrmv(handle_cusparse, op, m, k, nnz, &alpha, descr, csrVal, csrRowPtr, csrColInd, B, &beta, C);
if (result != CUSPARSE_STATUS_SUCCESS) {
throw std::runtime_error("failed to perform cusparseDcsrmv");
}
}
void TPZCudaCalls::SpMSpM(int opt, int sym, int m, int n, int k, int nnzA, REAL *csrValA, int *csrRowPtrA, int *csrColIndA,
int nnzB, REAL *csrValB, int *csrRowPtrB, int *csrColIndB,
int nnzC, REAL *csrValC, int *csrRowPtrC) {
if(cusparse_h == false) {
cusparse_h = true;
cusparseStatus_t result = cusparseCreate(&handle_cusparse);
if (result != CUSPARSE_STATUS_SUCCESS) {
throw std::runtime_error("failed to initialize cuSparse");
}
}
cusparseMatDescr_t descr;
cusparseCreateMatDescr(&descr);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
if(sym == 0) {
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
}
else {
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_SYMMETRIC);
}
cusparseOperation_t op;
if(opt == 0) {
op = CUSPARSE_OPERATION_NON_TRANSPOSE;
} else {
op = CUSPARSE_OPERATION_TRANSPOSE;
}
int *csrColIndC;
cudaMalloc((void**)&csrColIndC, sizeof(int)*nnzC);
cusparseStatus_t result = cusparseDcsrgemm(handle_cusparse, op, CUSPARSE_OPERATION_NON_TRANSPOSE, m, n, k,
descr, nnzA, csrValA, csrRowPtrA, csrColIndA,
descr, nnzB, csrValB, csrRowPtrB, csrColIndB,
descr, csrValC, csrRowPtrC, csrColIndC);
if (result != CUSPARSE_STATUS_SUCCESS) {
throw std::runtime_error("failed to perform cusparseDcsrgemm");
}
}
void TPZCudaCalls::ComputeSigma(bool update_mem, int npts, REAL *glob_delta_strain, REAL *glob_sigma, REAL lambda, REAL mu, REAL mc_phi, REAL mc_psi, REAL mc_cohesion, REAL *dPlasticStrain,
REAL *dMType, REAL *dAlpha, REAL *dSigma, REAL *dStrain, REAL *weight) {
int numBlocks = (npts + NT - 1) / NT;
ComputeSigmaKernel<<<numBlocks,NT>>> (update_mem, npts, glob_delta_strain, glob_sigma, lambda, mu, mc_phi, mc_psi, mc_cohesion, dPlasticStrain, dMType, dAlpha, dSigma, dStrain, weight);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
std::string error_string = cudaGetErrorString(error);
std::string error_message = "failed to perform ComputeSigmaKernel: " + error_string;
throw std::runtime_error(error_message);
}
}
void TPZCudaCalls::ComputeSigmaDep(bool update_mem, int npts, REAL *glob_delta_strain, REAL *glob_sigma, REAL *glob_dep, REAL lambda, REAL mu, REAL mc_phi, REAL mc_psi, REAL mc_cohesion, REAL *dPlasticStrain,
REAL *dMType, REAL *dAlpha, REAL *dSigma, REAL *dStrain, REAL *weight) {
int numBlocks = (npts + NT - 1) / NT;
ComputeSigmaDepKernel<<<numBlocks,NT>>> (update_mem, npts, glob_delta_strain, glob_sigma, glob_dep, lambda, mu, mc_phi, mc_psi, mc_cohesion, dPlasticStrain, dMType, dAlpha, dSigma, dStrain, weight);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
std::string error_string = cudaGetErrorString(error);
std::string error_message = "failed to perform ComputeSigmaDepKernel: " + error_string;
throw std::runtime_error(error_message);
}
}
void TPZCudaCalls::MatrixAssemble(REAL *Kc, REAL *dep, int nel, int *el_color_index,
REAL *storage, int *rowsizes, int *colsizes, int *rowfirstindex, int *colfirstindex, int *matrixposition, int *matrixstride) {
int numBlocks = (nel + NT_sm - 1) / NT_sm;
MatrixAssembleKernel<<<numBlocks,NT_sm>>>(nel, Kc, dep, el_color_index, storage, rowsizes, colsizes, rowfirstindex, colfirstindex, matrixposition, matrixstride);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
std::string error_string = cudaGetErrorString(error);
std::string error_message = "failed to perform MatrixAssembleKernel: " + error_string;
throw std::runtime_error(error_message);
}
}
void TPZCudaCalls::DeToDevice(REAL lambda, REAL mu) {
REAL De_host[] = {lambda + 2.0*mu, 0, lambda, 0, mu, 0, lambda, 0, lambda + 2.0*mu};
cudaMemcpyToSymbol(De, &De_host, 9 * sizeof(REAL));
}
void TPZCudaCalls::SolveCG(int n, int nnzA, REAL *csrValA, int *csrRowPtrA, int *csrColIndA, REAL *r, REAL *x) {
if(cusparse_h == false) {
cusparse_h = true;
cusparseStatus_t result = cusparseCreate(&handle_cusparse);
if (result != CUSPARSE_STATUS_SUCCESS) {
throw std::runtime_error("failed to initialize cuSparse");
}
}
if(cublas_h == false) {
cublas_h = true;
cublasStatus_t result = cublasCreate(&handle_cublas);
if (result != CUBLAS_STATUS_SUCCESS) {
throw std::runtime_error("failed to initialize cuBLAS");
}
}
cusparseMatDescr_t descr;
cusparseCreateMatDescr(&descr);
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_SYMMETRIC);
cusparseSetMatFillMode(descr, CUSPARSE_FILL_MODE_UPPER);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
REAL alpha = 1.0;
REAL alpham1 = -1.0;
REAL beta = 0.0;
REAL r0 = 0.;
REAL b;
REAL r1;
REAL dot;
REAL a;
REAL na;
REAL *d_Ax;
REAL *d_p;
cudaMalloc((void **)&d_Ax, n*sizeof(REAL));
cudaMalloc((void **)&d_p, n*sizeof(REAL));
cusparseDcsrmv(handle_cusparse,CUSPARSE_OPERATION_NON_TRANSPOSE, n, n, nnzA, &alpha, descr, csrValA, csrRowPtrA, csrColIndA, x, &beta, d_Ax);
cublasDaxpy(handle_cublas, n, &alpham1, d_Ax, 1, r, 1);
cublasDdot(handle_cublas, n, r, 1, r, 1, &r1);
const REAL tol = 1.e-5;
const int max_iter = 10000;
int k;
k = 1;
while (r1 > tol*tol && k <= max_iter)
{
if (k > 1)
{
b = r1 / r0;
cublasDscal(handle_cublas, n, &b, d_p, 1);
cublasDaxpy(handle_cublas, n, &alpha, r, 1, d_p, 1);
}
else
{
cublasDcopy(handle_cublas, n, r, 1, d_p, 1);
}
cusparseDcsrmv(handle_cusparse, CUSPARSE_OPERATION_NON_TRANSPOSE, n, n, nnzA, &alpha, descr, csrValA, csrRowPtrA, csrColIndA, d_p, &beta, d_Ax);
cublasDdot(handle_cublas, n, d_p, 1, d_Ax, 1, &dot);
a = r1 / dot;
cublasDaxpy(handle_cublas, n, &a, d_p, 1, x, 1);
na = -a;
cublasDaxpy(handle_cublas, n, &na, d_Ax, 1, r, 1);
r0 = r1;
cublasDdot(handle_cublas, n, r, 1, r, 1, &r1);
cudaThreadSynchronize();
k++;
}
cudaFree(d_p);
cudaFree(d_Ax);
}
|
23641f93f426d4170d5d210c6b43fdc98e750124.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void add(int *a, int *r, int *g, int *b, float *gc)
{
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
gc[5120 * 6 + i * 6 ] = b[i] * 0.00390625;
//gc[5120 * 6 + i * 6 ] = float(b[i]) / 256;
gc[5120 * 6 + i * 6 + 1] = g[i] * 0.00390625;
//gc[5120 * 6 + i * 6 + 1] = float(g[i]) / 256;
gc[5120 * 6 + i * 6 + 2] = r[i] * 0.00390625;
//gc[5120 * 6 + i * 6 + 2] = float(r[i]) / 256;
// gc[5120 * 6 + i * 6 + 3] = float(i - ((i>>9)<<9) ); // i%512
//gc[5120 * 6 + i * 6 + 3] = float(i % 512);
// gc[5120 * 6 + i * 6 + 4] = float( i >> 9);
//gc[5120 * 6 + i * 6 + 4] = float((i - (i % 512)) / 512);
// gc[5120 * 6 + i * 6 + 5] = float(a[i]);
}
|
23641f93f426d4170d5d210c6b43fdc98e750124.cu
|
#include "includes.h"
__global__ void add(int *a, int *r, int *g, int *b, float *gc)
{
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
gc[5120 * 6 + i * 6 ] = b[i] * 0.00390625;
//gc[5120 * 6 + i * 6 ] = float(b[i]) / 256;
gc[5120 * 6 + i * 6 + 1] = g[i] * 0.00390625;
//gc[5120 * 6 + i * 6 + 1] = float(g[i]) / 256;
gc[5120 * 6 + i * 6 + 2] = r[i] * 0.00390625;
//gc[5120 * 6 + i * 6 + 2] = float(r[i]) / 256;
// gc[5120 * 6 + i * 6 + 3] = float(i - ((i>>9)<<9) ); // i%512
//gc[5120 * 6 + i * 6 + 3] = float(i % 512);
// gc[5120 * 6 + i * 6 + 4] = float( i >> 9);
//gc[5120 * 6 + i * 6 + 4] = float((i - (i % 512)) / 512);
// gc[5120 * 6 + i * 6 + 5] = float(a[i]);
}
|
0a143a7dc4de1850a3f5aea0faededc8e6d1aa45.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdlib.h>
#include <fstream>
#include <string>
#include <vector>
#include <set>
using namespace std;
vector<string> splitpath( const string& str, const set<char> delimiters)
{
vector<string> result;
char const* pch = str.c_str();
char const* start = pch;
for(; *pch; ++pch)
{
if (delimiters.find(*pch) != delimiters.end())
{
if (start != pch)
{
string str(start, pch);
result.push_back(str);
}
else
{
result.push_back("");
}
start = pch + 1;
}
}
result.push_back(start);
return result;
}
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
cout << hipGetErrorString(error) << endl; \
} \
} while (0)
// Sumar cada columna(pixel) de las imagenes en paralelo
__global__ void kernel_swapArray(float *r_in, float *g_in, float *b_in,
float *r_result, float *g_result, float *b_result , int size, int x) {
int Idx = threadIdx.x + blockIdx.x * blockDim.x;
if (Idx < size) {
r_result[Idx] = ((Idx/x)%2 == 0)? r_in[Idx +x]: r_in[Idx-x];
g_result[Idx] = ((Idx/x)%2 == 0)? g_in[Idx +x]: g_in[Idx-x];
b_result[Idx] = ((Idx/x)%2 == 0)? b_in[Idx +x]: b_in[Idx-x];
}
}
int main(int argc, char *argv[]){
string input_file_name;
if (argc > 1) {
input_file_name = argv[1];
}
ifstream infile;
cout << input_file_name.c_str() << endl;
infile.open(input_file_name.c_str());
int M,N, size;
float *r_in_host, *g_in_host, *b_in_host, *r_out_host, *g_out_host, *b_out_host;
float *r_in_dev, *g_in_dev, *b_in_dev, *r_out_dev, *g_out_dev, *b_out_dev;
infile >> M >> N;
cout << M << N << endl;
size = M*N;
// Allocating arrays
r_in_host = (float *)malloc(size * sizeof(float));
g_in_host = (float *)malloc(size * sizeof(float));
b_in_host = (float *)malloc(size * sizeof(float));
r_out_host = (float *)malloc(size * sizeof(float));
g_out_host = (float *)malloc(size * sizeof(float));
b_out_host = (float *)malloc(size * sizeof(float));
// Reading channels
for (int i = 0; i < size; i++)
{
infile >> r_in_host[i];
}
for (int i = 0; i < size; i++)
{
infile >> g_in_host[i];
}
for (int i = 0; i < size; i++)
{
infile >> b_in_host[i];
}
// Preparando archivo donde iran los resultados
set<char> delims{'/'};
vector<string> path = splitpath(input_file_name, delims);
ofstream times_file, result_file;
times_file.open("resultados/times_cuda_pregunta2.txt", ios_base::app);
int x_to_test[] = {1, 2, 4, 8, 16, 32, 64, 128, 256, 512};
for (int i = 0; i < 10; i++)
{
int X = x_to_test[i];
hipEvent_t ct1, ct2;
float dt;
// Input in device
CUDA_CHECK(hipMalloc((void**)&r_in_dev, size * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&g_in_dev, size * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&b_in_dev, size * sizeof(float)));
// Copy
CUDA_CHECK(hipMemcpy(r_in_dev, r_in_host, size * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(g_in_dev, g_in_host, size * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(b_in_dev, b_in_host, size * sizeof(float), hipMemcpyHostToDevice));
// Output in device
CUDA_CHECK(hipMalloc((void**)&r_out_dev, size * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&g_out_dev, size * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&b_out_dev, size * sizeof(float)));
hipEventCreate(&ct1);
hipEventCreate(&ct2);
hipEventRecord(ct1);
// Llamar algoritmo
int grid_size, block_size = 256;
grid_size = (int)ceil((float) size / block_size);
hipLaunchKernelGGL(( kernel_swapArray), dim3(grid_size), dim3(block_size), 0, 0, r_in_dev, g_in_dev, b_in_dev, r_out_dev, g_out_dev, b_out_dev, size, X);
hipEventRecord(ct2);
hipEventSynchronize(ct2);
hipEventElapsedTime(&dt, ct1, ct2);
float duration;
duration = dt;
std::cout << "Tiempo GPU: " << duration << "[ms]" << std::endl;
CUDA_CHECK(hipMemcpy(r_out_host, r_out_dev, size * sizeof(float), hipMemcpyDeviceToHost));
CUDA_CHECK(hipMemcpy(g_out_host, g_out_dev, size * sizeof(float), hipMemcpyDeviceToHost));
CUDA_CHECK(hipMemcpy(b_out_host, b_out_dev, size * sizeof(float), hipMemcpyDeviceToHost));
// Escribiendo resultado en archivo
times_file << "X = "<< X << " " << path.back() << " " << duration << "[ms]" << endl;
// Printing the result file
string result_file_name = "resultados/result_cuda_pregunta3_x"+to_string(X)+"_"+path.back();
cout << result_file_name << endl;
result_file.open(result_file_name);
result_file << M << " " << N << endl;
for (int j = 0; j < size-1; j++)
{
result_file << r_out_host[j] << " ";
}
result_file << r_out_host[size-1] << endl;
for (int j = 0; j < size-1; j++)
{
result_file << g_out_host[j] << " ";
}
result_file << g_out_host[size-1] << endl;
for (int j = 0; j < size-1; j++)
{
result_file << b_out_host[j] << " ";
}
result_file << b_out_host[size-1];
result_file.close();
CUDA_CHECK(hipFree(r_in_dev));
CUDA_CHECK(hipFree(g_in_dev));
CUDA_CHECK(hipFree(b_in_dev));
CUDA_CHECK(hipFree(r_out_dev));
CUDA_CHECK(hipFree(g_out_dev));
CUDA_CHECK(hipFree(b_out_dev));
}
// Liberar memoria
free(r_in_host);
free(g_in_host);
free(b_in_host);
free(r_out_host);
free(g_out_host);
free(b_out_host);
times_file.close();
infile.close();
return 0;
}
|
0a143a7dc4de1850a3f5aea0faededc8e6d1aa45.cu
|
#include <iostream>
#include <stdlib.h>
#include <fstream>
#include <string>
#include <vector>
#include <set>
using namespace std;
vector<string> splitpath( const string& str, const set<char> delimiters)
{
vector<string> result;
char const* pch = str.c_str();
char const* start = pch;
for(; *pch; ++pch)
{
if (delimiters.find(*pch) != delimiters.end())
{
if (start != pch)
{
string str(start, pch);
result.push_back(str);
}
else
{
result.push_back("");
}
start = pch + 1;
}
}
result.push_back(start);
return result;
}
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
cout << cudaGetErrorString(error) << endl; \
} \
} while (0)
// Sumar cada columna(pixel) de las imagenes en paralelo
__global__ void kernel_swapArray(float *r_in, float *g_in, float *b_in,
float *r_result, float *g_result, float *b_result , int size, int x) {
int Idx = threadIdx.x + blockIdx.x * blockDim.x;
if (Idx < size) {
r_result[Idx] = ((Idx/x)%2 == 0)? r_in[Idx +x]: r_in[Idx-x];
g_result[Idx] = ((Idx/x)%2 == 0)? g_in[Idx +x]: g_in[Idx-x];
b_result[Idx] = ((Idx/x)%2 == 0)? b_in[Idx +x]: b_in[Idx-x];
}
}
int main(int argc, char *argv[]){
string input_file_name;
if (argc > 1) {
input_file_name = argv[1];
}
ifstream infile;
cout << input_file_name.c_str() << endl;
infile.open(input_file_name.c_str());
int M,N, size;
float *r_in_host, *g_in_host, *b_in_host, *r_out_host, *g_out_host, *b_out_host;
float *r_in_dev, *g_in_dev, *b_in_dev, *r_out_dev, *g_out_dev, *b_out_dev;
infile >> M >> N;
cout << M << N << endl;
size = M*N;
// Allocating arrays
r_in_host = (float *)malloc(size * sizeof(float));
g_in_host = (float *)malloc(size * sizeof(float));
b_in_host = (float *)malloc(size * sizeof(float));
r_out_host = (float *)malloc(size * sizeof(float));
g_out_host = (float *)malloc(size * sizeof(float));
b_out_host = (float *)malloc(size * sizeof(float));
// Reading channels
for (int i = 0; i < size; i++)
{
infile >> r_in_host[i];
}
for (int i = 0; i < size; i++)
{
infile >> g_in_host[i];
}
for (int i = 0; i < size; i++)
{
infile >> b_in_host[i];
}
// Preparando archivo donde iran los resultados
set<char> delims{'/'};
vector<string> path = splitpath(input_file_name, delims);
ofstream times_file, result_file;
times_file.open("resultados/times_cuda_pregunta2.txt", ios_base::app);
int x_to_test[] = {1, 2, 4, 8, 16, 32, 64, 128, 256, 512};
for (int i = 0; i < 10; i++)
{
int X = x_to_test[i];
cudaEvent_t ct1, ct2;
float dt;
// Input in device
CUDA_CHECK(cudaMalloc((void**)&r_in_dev, size * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&g_in_dev, size * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&b_in_dev, size * sizeof(float)));
// Copy
CUDA_CHECK(cudaMemcpy(r_in_dev, r_in_host, size * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(g_in_dev, g_in_host, size * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(b_in_dev, b_in_host, size * sizeof(float), cudaMemcpyHostToDevice));
// Output in device
CUDA_CHECK(cudaMalloc((void**)&r_out_dev, size * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&g_out_dev, size * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&b_out_dev, size * sizeof(float)));
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
// Llamar algoritmo
int grid_size, block_size = 256;
grid_size = (int)ceil((float) size / block_size);
kernel_swapArray<<<grid_size, block_size>>>(r_in_dev, g_in_dev, b_in_dev, r_out_dev, g_out_dev, b_out_dev, size, X);
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
float duration;
duration = dt;
std::cout << "Tiempo GPU: " << duration << "[ms]" << std::endl;
CUDA_CHECK(cudaMemcpy(r_out_host, r_out_dev, size * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMemcpy(g_out_host, g_out_dev, size * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMemcpy(b_out_host, b_out_dev, size * sizeof(float), cudaMemcpyDeviceToHost));
// Escribiendo resultado en archivo
times_file << "X = "<< X << " " << path.back() << " " << duration << "[ms]" << endl;
// Printing the result file
string result_file_name = "resultados/result_cuda_pregunta3_x"+to_string(X)+"_"+path.back();
cout << result_file_name << endl;
result_file.open(result_file_name);
result_file << M << " " << N << endl;
for (int j = 0; j < size-1; j++)
{
result_file << r_out_host[j] << " ";
}
result_file << r_out_host[size-1] << endl;
for (int j = 0; j < size-1; j++)
{
result_file << g_out_host[j] << " ";
}
result_file << g_out_host[size-1] << endl;
for (int j = 0; j < size-1; j++)
{
result_file << b_out_host[j] << " ";
}
result_file << b_out_host[size-1];
result_file.close();
CUDA_CHECK(cudaFree(r_in_dev));
CUDA_CHECK(cudaFree(g_in_dev));
CUDA_CHECK(cudaFree(b_in_dev));
CUDA_CHECK(cudaFree(r_out_dev));
CUDA_CHECK(cudaFree(g_out_dev));
CUDA_CHECK(cudaFree(b_out_dev));
}
// Liberar memoria
free(r_in_host);
free(g_in_host);
free(b_in_host);
free(r_out_host);
free(g_out_host);
free(b_out_host);
times_file.close();
infile.close();
return 0;
}
|
36600da8cb31df5e7a31446bb8dde9956e45031f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <thrust/adjacent_difference.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/random.h>
#include <thrust/inner_product.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include "Header.h"
#include "helper_cuda.h"
#include "timeutility.h"
//
//void preprocessOnCuda(std::ifstream& ifs)
//{
// OriEdgeList oel;
// read_edges(ifs, oel);
// int* dev_edges;
// int* dev_e_tmp;
// size_t edge_size = sizeof(int) * oel.edge_num * 2;
// checkCudaErrors(hipMalloc((void**)&dev_edges, edge_size));
// checkCudaErrors(hipMalloc((void**)&dev_e_tmp, edge_size));
// checkCudaErrors(hipMemcpy(dev_edges, oel.edges.data(), edge_size, hipMemcpyHostToDevice));
// checkCudaErrors(hipMemcpy(dev_e_tmp, dev_edges, edge_size, hipMemcpyDeviceToDevice));
// std::cout << "copied edges to dev" << std::endl;
//
// thrust::device_ptr<int> dptr_beg(dev_e_tmp);
// thrust::device_ptr<int> dptr_end(dev_e_tmp + oel.edge_num * 2);
// thrust::sort(dptr_beg, dptr_end);
//
// //int num = thrust::inner_product(dptr_beg, dptr_end - 1, dptr_beg + 1, 1, thrust::plus<int>(), thrust::not_equal_to<int>());
// //std::cout << "num of nodes" << num << std::endl;
//
// //anonying warning about sort
// //int* tmp;
// //checkCudaErrors(hipMalloc((void**)&tmp, sizeof(int) * 2));
// //thrust::sort(thrust::device_ptr<int>(tmp), thrust::device_ptr<int>(tmp) + 2);
//
//
// thrust::device_vector<int> d_keys(oel.node_num);
// thrust::device_vector<int> d_degree(oel.node_num);
// thrust::reduce_by_key(dptr_beg, dptr_end, thrust::constant_iterator<int>(1), d_keys.begin(), d_degree.begin());
// thrust::sort_by_key(d_degree.begin(), d_degree.end(), d_keys.begin(), thrust::greater<int>());
// checkCudaErrors(hipFree(dev_e_tmp));
// thrust::fill(d_degree.begin(), d_degree.end(), thrust::counting_iterator<int>(0));
// //thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple()))
//}
|
36600da8cb31df5e7a31446bb8dde9956e45031f.cu
|
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <thrust/adjacent_difference.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/random.h>
#include <thrust/inner_product.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include "Header.h"
#include "helper_cuda.h"
#include "timeutility.h"
//
//void preprocessOnCuda(std::ifstream& ifs)
//{
// OriEdgeList oel;
// read_edges(ifs, oel);
// int* dev_edges;
// int* dev_e_tmp;
// size_t edge_size = sizeof(int) * oel.edge_num * 2;
// checkCudaErrors(cudaMalloc((void**)&dev_edges, edge_size));
// checkCudaErrors(cudaMalloc((void**)&dev_e_tmp, edge_size));
// checkCudaErrors(cudaMemcpy(dev_edges, oel.edges.data(), edge_size, cudaMemcpyHostToDevice));
// checkCudaErrors(cudaMemcpy(dev_e_tmp, dev_edges, edge_size, cudaMemcpyDeviceToDevice));
// std::cout << "copied edges to dev" << std::endl;
//
// thrust::device_ptr<int> dptr_beg(dev_e_tmp);
// thrust::device_ptr<int> dptr_end(dev_e_tmp + oel.edge_num * 2);
// thrust::sort(dptr_beg, dptr_end);
//
// //int num = thrust::inner_product(dptr_beg, dptr_end - 1, dptr_beg + 1, 1, thrust::plus<int>(), thrust::not_equal_to<int>());
// //std::cout << "num of nodes" << num << std::endl;
//
// //anonying warning about sort
// //int* tmp;
// //checkCudaErrors(cudaMalloc((void**)&tmp, sizeof(int) * 2));
// //thrust::sort(thrust::device_ptr<int>(tmp), thrust::device_ptr<int>(tmp) + 2);
//
//
// thrust::device_vector<int> d_keys(oel.node_num);
// thrust::device_vector<int> d_degree(oel.node_num);
// thrust::reduce_by_key(dptr_beg, dptr_end, thrust::constant_iterator<int>(1), d_keys.begin(), d_degree.begin());
// thrust::sort_by_key(d_degree.begin(), d_degree.end(), d_keys.begin(), thrust::greater<int>());
// checkCudaErrors(cudaFree(dev_e_tmp));
// thrust::fill(d_degree.begin(), d_degree.end(), thrust::counting_iterator<int>(0));
// //thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple()))
//}
|
bab8870c82e5f2329ca26af9c03f45af42d8aead.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Open sourced multi-head attention
**/
#include <type_traits>
#include <stdint.h>
#include "fastertransformer/open_decoder.h"
#include "hipcub/hipcub.hpp"
#include "fastertransformer/utils/nvtx_utils.h"
#include "masked_multihead_attention.h"
namespace fastertransformer{
const int WARP_SIZE = 32;
const bool ATTENION_OPT = true;
const int ATTENTION_BLOCK_SIZE = 256;
///////////////////////////////////////////////////////////////////////////////////////////////////
template <int HALF_ELEMENTS_PER_WARP_LOAD>
using Copy_half_t =
typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 32, half,
typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 64, int,
typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 128, int2, int4
>::type
>::type
>::type;
template <typename T, int ELEMENTS_PER_WARP_LOAD>
using Copy_t = Copy_half_t<sizeof(T) / sizeof(half) * ELEMENTS_PER_WARP_LOAD>;
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
masked multi-head attention
*/
#define FINAL_MASK 0xffffffff
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
/* Calculate the sum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
// __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f);
val = warpReduceSum<T>(val);
return val;
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
// __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)-1e20f;
val = warpReduceMax(val);
return val;
}
template <int size_per_head, int block_sz, typename T>
__global__
void masked_attention_kernel_opt(
T* __restrict key_buf, T* __restrict value_buf,
T* __restrict query_buf, const T* __restrict self_Q_bias,
T* __restrict key_cache, const T* __restrict self_K_bias,
T* __restrict value_cache, const T* __restrict self_V_bias,
T* __restrict context_buf, const bool* finished,
int batch_size, int head_num, const int step, const T scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
typedef Copy_t<T, size_per_head> copy_t;
const int elems_per_thread = size_per_head / WARP_SIZE;
union Access_t
{
copy_t v;
T x[elems_per_thread]; // supported size 1,2,4
};
typedef struct Float_n_t
{
T x[elems_per_thread]; // supported size 1,2,4
} float_n_t;
__shared__ float_n_t sq[block_sz];
extern __shared__ float logits[]; // use to store the logits from [0~step]
const int tid = threadIdx.x;
const int warp_num = block_sz / WARP_SIZE;
const int bid = blockIdx.x;
const int head_id = blockIdx.x % head_num;
const int warp_id = tid / WARP_SIZE; // warp_id in block
const int lane_id = tid % WARP_SIZE; // lane_id in warp
typedef hipcub::BlockReduce<float, block_sz> MaxValBlockReduce;
typedef hipcub::BlockReduce<float, block_sz> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage;
__shared__ typename BlockReduce::TempStorage block_temp_storage;
__shared__ typename hipcub::WarpReduce<float>::TempStorage temp_storage[warp_num];
int qkv_id = bid * size_per_head;
int qkv_bias_id = head_id * size_per_head;
query_buf = &query_buf[qkv_id];
key_buf = &key_buf[qkv_id];
value_buf = &value_buf[qkv_id];
self_K_bias = &self_K_bias[qkv_bias_id];
key_cache = &key_cache[qkv_id];
self_Q_bias = &self_Q_bias[qkv_bias_id];
self_V_bias = &self_V_bias[qkv_bias_id];
value_cache = &value_cache[qkv_id];
context_buf = &context_buf[qkv_id];
Access_t bias_r, query_buf_r;
Access_t key_val_r, key_buf_r;
Access_t value_val_r, value_buf_r;
// each warp will have its own copy of sq
query_buf_r.v = *((copy_t *)query_buf + lane_id);
key_buf_r.v = *((copy_t *)key_buf + lane_id);
bias_r.v = *((copy_t *)self_Q_bias + lane_id);
float qb_r[elems_per_thread];
for (int i = 0; i < elems_per_thread; ++i)
{
qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i];
}
//offset for each step
int offset = batch_size * head_num * size_per_head;
bias_r.v = *((copy_t *) self_K_bias + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v;
}
float val = 0.f;
for (int i = 0; i < elems_per_thread; i++)
{
val = val + (float)key_val_r.x[i] * qb_r[i] * (float)scalar;
}
float qk = hipcub::WarpReduce<float>(temp_storage[warp_id]).Sum(val);
if (lane_id == 0)
{
logits[ite] = qk;
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = -1e20f;
for(int i = tid; i < step; i += blockDim.x)
local_i = max(local_i, logits[i]);
float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, hipcub::Max());
if(tid == 0)
s_max_val = max_val;
__syncthreads();
float local_o = 0.0f;
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = __expf(logits[i] - s_max_val);
local_o += logits[i];
}
float val = BlockReduce(block_temp_storage).Sum(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
float s_sum_inverse = __fdividef(1.0f, s_sum);
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = logits[i] * s_sum_inverse;
}
__syncthreads();
// This optimization introduces discrepancy because of different order in FP32 summation
float sum_r[elems_per_thread] = {0.f};
bias_r.v = *((copy_t *) self_V_bias + lane_id);
value_buf_r.v = *((copy_t *)value_buf + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
value_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = (float)value_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&value_cache[ite * offset] + lane_id) = value_val_r.v;
}
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] += (float)value_val_r.x[i] * logits[ite];
}
}
for (int i = 0; i < elems_per_thread; i++)
{
sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i];
}
__syncthreads();
if (warp_id == 0)
{
#pragma unroll
for (int j = 1; j < warp_num; j++)
{
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + tid].x[i];
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = sum_r[i];
}
if (warp_id == 0)
{
*((copy_t *)context_buf + lane_id) = value_val_r.v;
}
}
template <typename T>
__global__
void masked_attention_kernel(
T* key_buf, T* value_buf,
T* query_buf, const T* self_Q_bias,
T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias,
T* context_buf, const bool* finished,
int batch_size, int head_num, int size_per_head, const int step, const T scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
extern __shared__ __align__(sizeof(T)) unsigned s_buf[];
T* sq = reinterpret_cast<T *>(s_buf);
T* logits = reinterpret_cast<T *>(&sq[size_per_head]);
int tid = threadIdx.x;
int bid = blockIdx.x / head_num;
int head_id = blockIdx.x % head_num;
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid;
int qkv_bias_id = head_id * size_per_head + tid;
if(tid < size_per_head)
sq[tid] = query_buf[qkv_id] + self_Q_bias[qkv_bias_id];
__syncthreads();
//offset for each step
int offset = batch_size * head_num * size_per_head;
for(int ite = 0; ite < step; ++ite)
{
T key = tid < size_per_head ? key_cache[ite * offset + qkv_id] : (T)0.0f;
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1 && tid < size_per_head)
{
key = key_buf[qkv_id] + self_K_bias[qkv_bias_id];
key_cache[ite * offset + qkv_id] = key;
}
T val = (tid < size_per_head) ? key * sq[tid] * scalar : (T)(0.0f);
T qk = blockReduceSum(val);
if(threadIdx.x == 0)
logits[ite] = qk;
__syncthreads(); //try to remove
}
__syncthreads(); //try to remove
__shared__ float s_max_val, s_sum;
float local_i = tid < step ? (float)logits[tid] : -1e20f;
float max_val = blockReduceMax<float>(local_i);
if(tid == 0)
s_max_val = max_val;
__syncthreads();
local_i -= s_max_val;
float local_o = tid < step ? __expf(local_i) : 0.0f;
float val = blockReduceSum<float>(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
if(tid < step)
logits[tid] = local_o / s_sum;
__syncthreads();
if(tid < size_per_head)
{
T sum = (T)0.0f;
for(int ite = 0; ite < step; ++ite)
{
T value = value_cache[ite * offset + qkv_id];
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
value = value_buf[qkv_id] + self_V_bias[qkv_bias_id];
value_cache[ite * offset + qkv_id] = value;
}
sum += value * logits[ite];
}
context_buf[qkv_id] = sum;
}
}
template <typename T>
void masked_attention_dispatch(
T* key_buf, T* value_buf,
T* query_buf, const T* self_Q_bias,
T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias,
T* context_buf, const bool* finished, int max_batch_size, int inference_batch_size,
int head_num, int size_per_head, const int step, const int max_seq_len, hipStream_t stream)
{
if (max_seq_len < 0) {
const int block_sz = ATTENTION_BLOCK_SIZE;
T scalar = (T)(1.f / sqrtf(size_per_head * 1.0f));
dim3 grid(inference_batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT)? 1:0);
switch (cond)
{
case 32:
hipLaunchKernelGGL(( masked_attention_kernel_opt<32, block_sz, T>), dim3(grid), dim3(block_sz), sizeof(float)*step, stream,
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, finished,
max_batch_size, head_num, step, scalar);
break;
case 64:
hipLaunchKernelGGL(( masked_attention_kernel_opt<64, block_sz, T>), dim3(grid), dim3(block_sz), sizeof(float)*step, stream,
key_buf, value_buf,
query_buf, self_Q_bias,
key_cache, self_K_bias,
value_cache, self_V_bias,
context_buf,
finished,
max_batch_size, head_num, step, scalar);
break;
case 128:
hipLaunchKernelGGL(( masked_attention_kernel_opt<128, block_sz, T>), dim3(grid), dim3(block_sz), sizeof(float)*step, stream,
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, finished,
max_batch_size, head_num, step, scalar);
break;
default:
// default path
int block_size = 128;
//suppose size_per_head <= 128
if(step <= 64)
block_size = 64;
else if(step <= 128 && step > size_per_head)
block_size = 128;
else if(step > 128 && step <= 256)
block_size = 256;
else if(step > 256 && step <= 512)
block_size = 512;
else
block_size = 1024;
if((int)block_size < size_per_head)
block_size = size_per_head;
assert(block_size <= 1024);
dim3 block(block_size);
T scalar = 1 / sqrtf(size_per_head * 1.0f);
int shared_size = sizeof(T) * (size_per_head + step);
hipLaunchKernelGGL(( masked_attention_kernel<T>), dim3(grid), dim3(block), shared_size, stream,
key_buf, value_buf,
query_buf, self_Q_bias,
key_cache, self_K_bias,
value_cache, self_V_bias,
context_buf, finished, max_batch_size,
head_num, size_per_head, step, scalar);
}
}
else {
assert(step > 0);
assert(size_per_head == 32 || size_per_head == 64 || size_per_head == 128);
using DataType = typename std::conditional<sizeof(T) == 4, float, uint16_t>::type;
// Prepare the parameters.
Masked_multihead_attention_params<DataType> params;
memset(¶ms, 0, sizeof(params));
params.q_bias = reinterpret_cast<const DataType *>(self_Q_bias);
params.k_bias = reinterpret_cast<const DataType *>(self_K_bias);
params.v_bias = reinterpret_cast<const DataType *>(self_V_bias);
// Set the output buffer.
params.out = reinterpret_cast<DataType *>(context_buf);
// Set the input buffers.
params.q = reinterpret_cast<const DataType *>(query_buf);
params.k = reinterpret_cast<const DataType *>(key_buf);
params.v = reinterpret_cast<const DataType *>(value_buf);
params.stride = 0;
params.finished = const_cast<bool*>(finished);
params.k_cache = reinterpret_cast<DataType *>(key_cache);
params.v_cache = reinterpret_cast<DataType *>(value_cache);
params.batch_size = inference_batch_size;
params.seq_length = max_seq_len;
params.timestep = step-1;
params.num_heads = head_num;
params.hidden_size_per_head = size_per_head;
params.inv_sqrt_dh = 1.F / sqrtf((float) params.hidden_size_per_head);
masked_multihead_attention(params, stream);
}
}
template void masked_attention_dispatch(
float* key_buf,
float* value_buf,
float* query_buf,
const float* self_Q_bias,
float* key_cache,
const float* self_K_bias,
float* value_cache,
const float* self_V_bias,
float* context_buf,
const bool* finished,
int max_batch_size,
int inference_batch_size,
int head_num,
int size_per_head,
const int step,
const int max_seq_size,
hipStream_t stream);
template void masked_attention_dispatch(
half* key_buf,
half* value_buf,
half* query_buf,
const half* self_Q_bias,
half* key_cache,
const half* self_K_bias,
half* value_cache,
const half* self_V_bias,
half* context_buf,
const bool* finished,
int max_batch_size,
int inference_batch_size,
int head_num,
int size_per_head,
const int step,
const int max_seq_size,
hipStream_t stream);
template <int size_per_head, int block_sz, typename T>
__global__
void fusedQKV_masked_attention_kernel_opt(
const T* __restrict qkv_buf, const T* __restrict qkv_bias,
T* __restrict key_cache,
T* __restrict value_cache,
T* __restrict context_buf, const bool* finished, int batch_size, int head_num, const int step, const T scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
typedef Copy_t<T, size_per_head> copy_t;
const int elems_per_thread = size_per_head / WARP_SIZE;
union Access_t
{
copy_t v;
T x[elems_per_thread]; // supported size 1,2,4
};
typedef struct Float_n_t
{
T x[elems_per_thread]; // supported size 1,2,4
} float_n_t;
__shared__ float_n_t sq[block_sz];
extern __shared__ float logits[]; // use to store the logits from [0~step]
const int tid = threadIdx.x;
const int warp_num = block_sz / WARP_SIZE;
const int bid = blockIdx.x;
const int head_id = blockIdx.x % head_num;
const int warp_id = tid / WARP_SIZE; // warp_id in block
const int lane_id = tid % WARP_SIZE; // lane_id in warp
const int batch_id = bid / head_num;
const int hidden_units = head_num * size_per_head;
typedef hipcub::BlockReduce<float, block_sz> MaxValBlockReduce;
typedef hipcub::BlockReduce<float, block_sz> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage;
__shared__ typename BlockReduce::TempStorage block_temp_storage;
__shared__ typename hipcub::WarpReduce<float>::TempStorage temp_storage[warp_num];
int qkv_id = batch_id * 3 * hidden_units + head_id * size_per_head;
int qkv_bias_id = head_id * size_per_head;
int cache_qkv_id = bid * size_per_head;
const T* query_buf = qkv_buf + qkv_id;
const T* key_buf = qkv_buf + hidden_units + qkv_id;
const T* value_buf = qkv_buf + 2 * hidden_units + qkv_id;
const T* self_Q_bias = qkv_bias + qkv_bias_id;
const T* self_K_bias = qkv_bias + hidden_units + qkv_bias_id;
const T* self_V_bias = qkv_bias + 2 * hidden_units + qkv_bias_id;
value_cache = value_cache + cache_qkv_id;
key_cache = key_cache + cache_qkv_id;
context_buf = context_buf + cache_qkv_id;
Access_t bias_r, query_buf_r;
Access_t key_val_r, key_buf_r;
Access_t value_val_r, value_buf_r;
// each warp will have its own copy of sq
query_buf_r.v = *((copy_t *)query_buf + lane_id);
key_buf_r.v = *((copy_t *)key_buf + lane_id);
bias_r.v = *((copy_t *)self_Q_bias + lane_id);
float qb_r[elems_per_thread];
for (int i = 0; i < elems_per_thread; ++i)
{
qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i];
}
//offset for each step
int offset = batch_size * hidden_units;
bias_r.v = *((copy_t *) self_K_bias + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v;
}
float val = 0.f;
for (int i = 0; i < elems_per_thread; i++)
{
val = val + (float)key_val_r.x[i] * qb_r[i] * (float)scalar;
}
float qk = hipcub::WarpReduce<float>(temp_storage[warp_id]).Sum(val);
if (lane_id == 0)
{
logits[ite] = qk;
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = -1e20f;
for(int i = tid; i < step; i += blockDim.x)
local_i = max(local_i, logits[i]);
float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, hipcub::Max());
if(tid == 0)
s_max_val = max_val;
__syncthreads();
float local_o = 0.0f;
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = __expf(logits[i] - s_max_val);
local_o += logits[i];
}
float val = BlockReduce(block_temp_storage).Sum(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
float s_sum_inverse = __fdividef(1.0f, s_sum);
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = logits[i] * s_sum_inverse;
}
__syncthreads();
// This optimization introduces discrepancy because of different order in FP32 summation
float sum_r[elems_per_thread] = {0.f};
bias_r.v = *((copy_t *) self_V_bias + lane_id);
value_buf_r.v = *((copy_t *)value_buf + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
value_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = (float)value_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&value_cache[ite * offset] + lane_id) = value_val_r.v;
}
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] += (float)value_val_r.x[i] * logits[ite];
}
}
for (int i = 0; i < elems_per_thread; i++)
{
sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i];
}
__syncthreads();
if (warp_id == 0)
{
#pragma unroll
for (int j = 1; j < warp_num; j++)
{
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + tid].x[i];
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = sum_r[i];
}
if (warp_id == 0)
{
*((copy_t *)context_buf + lane_id) = value_val_r.v;
}
}
template <typename T>
void fusedQKV_masked_attention_dispatch(
const T* qkv_buf, const T* qkv_bias,
T* key_cache, T* value_cache,
T* context_buf, const bool* finished, int max_batch_size, int inference_batch_size,
int head_num, int size_per_head, const int step, const int max_seq_len, hipStream_t stream)
{
if (max_seq_len < 0) {
const int block_sz = ATTENTION_BLOCK_SIZE;
T scalar = (T)(1.f / sqrtf(size_per_head * 1.0f));
dim3 grid(inference_batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT)? 1:0);
switch (cond)
{
case 32:
hipLaunchKernelGGL(( fusedQKV_masked_attention_kernel_opt<32, block_sz, T>), dim3(grid), dim3(block_sz), sizeof(float)*step, stream,
qkv_buf, qkv_bias,
key_cache, value_cache,
context_buf,
finished,
max_batch_size, head_num, step, scalar);
break;
case 64:
hipLaunchKernelGGL(( fusedQKV_masked_attention_kernel_opt<64, block_sz, T>), dim3(grid), dim3(block_sz), sizeof(float)*step, stream,
qkv_buf, qkv_bias,
key_cache,
value_cache,
context_buf,
finished,
max_batch_size, head_num, step, scalar);
break;
case 128:
hipLaunchKernelGGL(( fusedQKV_masked_attention_kernel_opt<128, block_sz, T>), dim3(grid), dim3(block_sz), sizeof(float)*step, stream,
qkv_buf, qkv_bias,
key_cache,
value_cache,
context_buf,
finished,
max_batch_size, head_num, step, scalar);
break;
default:
assert(false);
}
}
else {
using DataType = typename std::conditional<sizeof(T) == 4, float, uint16_t>::type;
// Prepare the parameters.
Masked_multihead_attention_params<DataType> params;
memset(¶ms, 0, sizeof(params));
int hidden_units = head_num * size_per_head;
params.q_bias = reinterpret_cast<const DataType *>(qkv_bias);
params.k_bias = reinterpret_cast<const DataType *>(qkv_bias) + hidden_units;
params.v_bias = reinterpret_cast<const DataType *>(qkv_bias) + 2 * hidden_units;
// Set the output buffer.
params.out = reinterpret_cast<DataType *>(context_buf);
// Set the input buffers.
params.q = reinterpret_cast<const DataType *>(qkv_buf);
params.k = reinterpret_cast<const DataType *>(qkv_buf) + hidden_units;
params.v = reinterpret_cast<const DataType *>(qkv_buf) + 2 * hidden_units;
params.stride = 3 * hidden_units;
params.finished = const_cast<bool*>(finished);
params.k_cache = reinterpret_cast<DataType *>(key_cache);
params.v_cache = reinterpret_cast<DataType *>(value_cache);
params.batch_size = inference_batch_size;
params.seq_length = max_seq_len;
params.timestep = step-1;
params.num_heads = head_num;
params.hidden_size_per_head = size_per_head;
params.inv_sqrt_dh = 1.F / sqrtf((float) params.hidden_size_per_head);
masked_multihead_attention(params, stream);
}
}
template void fusedQKV_masked_attention_dispatch(
const float* qkv_buf,
const float* qkv_bias,
float* key_cache,
float* value_cache,
float* context_buf,
const bool* finished,
int max_batch_size,
int inference_batch_size,
int head_num,
int size_per_head,
const int step,
const int max_seq_len,
hipStream_t stream);
template void fusedQKV_masked_attention_dispatch(
const half* qkv_buf,
const half* qkv_bias,
half* key_cache,
half* value_cache,
half* context_buf,
const bool* finished,
int max_batch_size,
int inference_batch_size,
int head_num,
int size_per_head,
const int step,
const int max_seq_len,
hipStream_t stream);
template <typename T>
void fusedQKV_masked_attention_kernelLauncher(
const T* qkv_buf,
const T* qkv_bias,
T* k_cache,
T* v_cache,
T* output,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int max_seq_len,
hipStream_t stream)
{
fusedQKV_masked_attention_dispatch(qkv_buf,
qkv_bias,
k_cache,
v_cache,
output,
nullptr,
batch_size,
batch_size,
head_num,
size_per_head,
seq_len,
max_seq_len,
stream);
}
template<typename T>
__global__ void transpose_4d(T* dst, T* src,
const int dim0,
const int dim1,
const int dim2,
const int dim3,
const int dim0_leading_dim,
const int ite)
{
// transpose from [dim0, dim1, dim2, dim3] to [dim2, X, dim1, dim3]
// where the dimension of X is dim0_leading_dim, and offset is ite * dim0
for(int i = threadIdx.x + blockIdx.x * blockDim.x; i < dim0 * dim1 * dim2 * dim3; i+= blockDim.x * gridDim.x)
{
int index = i;
const int d3 = index % dim3;
index = (index - d3) / dim3;
const int d2 = index % dim2;
index = (index - d2) / dim2;
const int d1 = index % dim1;
index = (index - d1) / dim1;
const int d0 = index % dim0;
index = (index - d0) / dim0;
dst[d2 * dim0_leading_dim * dim1 * dim3 + (d0 + dim0 * ite) * dim1 * dim3 + d1 * dim3 + d3] = src[i];
}
}
template<>
__global__ void transpose_4d(half* dst, half* src,
const int dim0,
const int dim1,
const int dim2,
const int dim3,
const int dim0_leading_dim,
const int ite)
{
half2 *dst_ptr = (half2 *) dst;
half2 *src_ptr = (half2 *) src;
const int half_dim3 = dim3 / 2;
// transpose from [dim0, dim1, dim2, half_dim3] to [dim2, dim0, dim1, half_dim3]
// where the dimension of X is dim0_leading_dim, and offset is ite * dim0
for(int i = threadIdx.x + blockIdx.x * blockDim.x; i < dim0 * dim1 * dim2 * half_dim3; i+= blockDim.x * gridDim.x)
{
int index = i;
const int d3 = index % half_dim3;
index = (index - d3) / half_dim3;
const int d2 = index % dim2;
index = (index - d2) / dim2;
const int d1 = index % dim1;
index = (index - d1) / dim1;
const int d0 = index % dim0;
index = (index - d0) / dim0;
dst_ptr[d2 * dim0_leading_dim * dim1 * half_dim3 + (d0 + dim0 * ite) * dim1 * half_dim3 + d1 * half_dim3 + d3] = src_ptr[i];
}
}
template<typename T>
void transpose_4d_kernelLauncher(T* dst, T* src,
const int local_batch_size,
const int seq_len,
const int size_per_head,
const int local_hidden_units,
const int local_head_num,
const int batch_size,
const int ite,
hipStream_t stream)
{
hipLaunchKernelGGL(( transpose_4d), dim3(local_batch_size * seq_len * local_hidden_units / 512), dim3(512 / (4 / (sizeof(T)))), 0, stream,
dst, src,
local_batch_size, local_head_num,
seq_len, size_per_head, batch_size, ite);
}
template void transpose_4d_kernelLauncher(
float* dst,
float* src,
const int local_batch_size,
const int seq_len,
const int size_per_head,
const int local_hidden_units,
const int local_head_num,
const int batch_size,
const int ite,
hipStream_t stream);
template void transpose_4d_kernelLauncher(
half* dst,
half* src,
const int local_batch_size,
const int seq_len,
const int size_per_head,
const int local_hidden_units,
const int local_head_num,
const int batch_size,
const int ite,
hipStream_t stream);
#define NEW_TRANSPOSE_BATCH_MAJOR 1
template<typename T>
__global__ void transpose_4d_batch_major_k_cache(T* k_dst, const T* k_src,
const int head_num,
const int size_per_head,
const int seq_len,
const int max_seq_len)
{
const int batch_id = blockIdx.y;
const int head_id = blockIdx.z;
constexpr int X_ELEMS = (sizeof(T) == 4)? 4 : 8;
auto key_src = reinterpret_cast<const uint4*>(k_src + batch_id * head_num * size_per_head * seq_len + head_id * size_per_head * seq_len);
auto key_dst = reinterpret_cast<uint4*>(k_dst + batch_id * head_num * size_per_head * max_seq_len + head_id * size_per_head * max_seq_len);
const int out_idx = blockIdx.x * blockDim.x + threadIdx.x;
int size_per_head_div_x = size_per_head / X_ELEMS;
if (out_idx >= head_num * size_per_head_div_x * max_seq_len) return;
int idx = out_idx;
const int k_seq_len_id = idx % max_seq_len;
idx = (idx - k_seq_len_id) / max_seq_len;
const int k_head_size_id = idx % size_per_head_div_x;
if (k_seq_len_id < seq_len)
key_dst[out_idx] = key_src[k_seq_len_id * size_per_head_div_x + k_head_size_id];
}
template<typename T>
__global__ void transpose_4d_batch_major_v_cache(T* v_dst, const T* v_src,
const int head_num,
const int size_per_head,
const int seq_len,
const int max_seq_len)
{
const int batch_id = blockIdx.y;
const int head_id = blockIdx.z;
// 16 byte loads will handle "x" dimension
auto val_src = reinterpret_cast<const uint4*>(v_src + batch_id * head_num * size_per_head * seq_len + head_id * size_per_head * seq_len);
auto val_dst = reinterpret_cast<uint4*>(v_dst + batch_id * head_num * size_per_head * max_seq_len + head_id * size_per_head * max_seq_len);
// idx is over output dimension L * size_per_head / x for values
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
constexpr int X_ELEMS = (sizeof(T) == 4)? 4 : 8;
const int size_per_head_div_x = size_per_head / X_ELEMS;
if (idx >= size_per_head_div_x * seq_len) return;
val_dst[idx] = val_src[idx];
}
template<typename T>
__global__ void transpose_4d_batch_major(T* k_dst, T* v_dst,
const T* k_src, const T* v_src,
const int head_num,
const int size_per_head,
const int seq_len,
const int max_seq_len)
{
const int hidden_dim = head_num * size_per_head;
const int x = (sizeof(T) == 4)? 4 : 8;
const int size_per_head_split = size_per_head / x;
const int batch_id = blockIdx.x;
const int seq_id = blockIdx.y;
for(int id = threadIdx.x; id < head_num * size_per_head_split * x; id += blockDim.x)
{
int tmp_id = id;
int x_id = tmp_id % x;
tmp_id = (tmp_id - x_id) / x;
int size_id = tmp_id % size_per_head_split;
tmp_id = (tmp_id - size_id) / size_per_head_split;
int head_id = tmp_id % head_num;
// key: [B, head_num, L, size_per_head / x, x] -> [B, head_num, size_per_head / x, L, x]
k_dst[batch_id * hidden_dim * max_seq_len + head_id * size_per_head * max_seq_len + size_id * max_seq_len * x + seq_id * x + x_id] =
k_src[batch_id * hidden_dim * seq_len + head_id * size_per_head * seq_len + seq_id * size_per_head + size_id * x + x_id];
// value: [B, head_num, L, size_per_head / x, x] -> [B, head_num, L, size_per_head/x, x]
v_dst[batch_id * hidden_dim * max_seq_len + head_id * size_per_head * max_seq_len + seq_id * size_per_head + size_id * x + x_id] =
v_src[batch_id * hidden_dim * seq_len + head_id * size_per_head * seq_len + seq_id * size_per_head + size_id * x + x_id];
}
}
template<typename T>
void transpose_4d_batch_major_kernelLauncher(T* k_dst, T* v_dst,
const T* k_src, const T* v_src,
const int local_batch_size,
const int seq_len,
const int max_seq_len,
const int size_per_head,
const int local_head_num,
hipStream_t stream)
{
constexpr int block_sz = 128;
#if NEW_TRANSPOSE_BATCH_MAJOR == 1
constexpr int x = (sizeof(T) == 4)? 4 : 8;
int size = max_seq_len * size_per_head / x;
dim3 grid((size + block_sz - 1) / block_sz, local_batch_size, local_head_num);
dim3 grid_v((seq_len * size_per_head / x + block_sz - 1) / block_sz, local_batch_size, local_head_num);
hipLaunchKernelGGL(( transpose_4d_batch_major_k_cache), dim3(grid), dim3(block_sz), 0, stream,
k_dst, k_src,
local_head_num,
size_per_head,
seq_len,
max_seq_len
);
hipLaunchKernelGGL(( transpose_4d_batch_major_v_cache), dim3(grid_v), dim3(block_sz), 0, stream,
v_dst, v_src,
local_head_num,
size_per_head,
seq_len,
max_seq_len
);
#else
dim3 grid(local_batch_size, seq_len);
hipLaunchKernelGGL(( transpose_4d_batch_major), dim3(grid), dim3(block_sz), 0, stream,
k_dst, v_dst,
k_src, v_src,
local_head_num,
size_per_head,
seq_len,
max_seq_len
);
#endif
}
template void transpose_4d_batch_major_kernelLauncher(float* k_dst, float* v_dst,
const float* k_src, const float* v_src,
const int local_batch_size,
const int seq_len,
const int max_seq_len,
const int size_per_head,
const int local_head_num,
hipStream_t stream);
template void transpose_4d_batch_major_kernelLauncher(half* k_dst, half* v_dst,
const half* k_src, const half* v_src,
const int local_batch_size,
const int seq_len,
const int max_seq_len,
const int size_per_head,
const int local_head_num,
hipStream_t stream);
template<typename T>
__global__
void add_QKV_bias_generalized_2(const T* __restrict QKV,
const T* __restrict bias,
T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int word_per_block)
{
// QKV: [batch x sequence length, hidden * 3]
const T* data_ptr;
T* buf_ptr;
int n = head_num * size_per_head;
const int blocks_per_word = n / blockDim.x;
const int blocks_per_buffer = gridDim.x / 3;
const int qkv_id = blockIdx.x / blocks_per_buffer;
const int block_id_in_buffer = blockIdx.x % blocks_per_buffer;
const int offset = block_id_in_buffer * blockDim.x + threadIdx.x;
const int bias_id = offset % n;
T* buf_ptrs[3] = {q_buf_, k_buf_, v_buf_};
const int bid = blockIdx.x;
for(int index = threadIdx.x; index < n; index += blockDim.x)
{
buf_ptrs[index / n][bid * n + index % n] = QKV[bid * 3 * n + index] + __ldg(&bias[index]);
}
}
template <typename T, int size_per_head, int block_sz>
__global__
void cross_attention_kernel_opt(
T* __restrict query_buf, const T* __restrict Q_bias,
T* __restrict key_cache, const T* __restrict K_bias,
T* __restrict value_cache, const T* __restrict V_bias,
const int* length_per_sample, T* __restrict context_buf,
const bool* finished,
int batch_size, int head_num, const int step, const int seq_len, const float scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
typedef Copy_t<T, size_per_head> copy_t;
const int elems_per_thread = size_per_head / WARP_SIZE;
union Access_t
{
copy_t v;
T x[elems_per_thread]; // supported size 1,2,4
};
typedef struct Float_n_t
{
float x[elems_per_thread]; // supported size 1,2,4
} float_n_t;
__shared__ float_n_t sq[block_sz];
extern __shared__ float logits[]; // use to store the logits from [0~step]
const int warp_id = threadIdx.x / WARP_SIZE;
const int warp_num = block_sz / WARP_SIZE;
typedef hipcub::BlockReduce<float, block_sz> MaxValBlockReduce;
typedef hipcub::BlockReduce<float, block_sz> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage;
__shared__ typename BlockReduce::TempStorage block_temp_storage;
__shared__ typename hipcub::WarpReduce<float>::TempStorage temp_storage[warp_num];
const int tid = threadIdx.x;
const int bid = blockIdx.x / head_num;
const int head_id = blockIdx.x % head_num;
int length = __ldg(&length_per_sample[bid]);
const int lane_id = tid % WARP_SIZE;
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head;
int qkv_bias_id = head_id * size_per_head;
int key_value_id = bid * (seq_len * head_num * size_per_head) +
+ head_id * size_per_head;
query_buf = &query_buf[qkv_id];
K_bias = &K_bias[qkv_bias_id];
key_cache = &key_cache[key_value_id];
Q_bias = &Q_bias[qkv_bias_id];
V_bias = &V_bias[qkv_bias_id];
value_cache = &value_cache[key_value_id];
context_buf = &context_buf[qkv_id];
Access_t bias_r, key_val_r, query_buf_r;
// each warp will have its own copy of sq
query_buf_r.v = *((copy_t *)query_buf + lane_id);
bias_r.v = *((copy_t *)Q_bias + lane_id);
float qb_r[elems_per_thread];
for (int i = 0; i < elems_per_thread; ++i)
{
qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i];
}
//offset for each step
int offset = head_num * size_per_head;
bias_r.v = *((copy_t *) K_bias + lane_id);
for(int ite = warp_id; ite < length; ite += warp_num)
{
key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id);
//For the first step, we should add bias to key memory cache.
//The KV memory cache only need to be updated at the first step.
if (step == 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v;
}
float val = 0.f;
for (int i = 0; i < elems_per_thread; i++)
{
val = val + (float)key_val_r.x[i] * qb_r[i] * scalar;
}
float qk = hipcub::WarpReduce<float>(temp_storage[warp_id]).Sum(val);
if (lane_id == 0)
{
logits[ite] = qk;
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = -1e20f;
for(int i = tid; i < length; i += blockDim.x)
local_i = max(local_i, logits[i]);
float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, hipcub::Max());
if(tid == 0)
s_max_val = max_val;
__syncthreads();
float local_o = 0.0f;
for(int i = tid; i < length; i += blockDim.x)
{
logits[i] = __expf(logits[i] - s_max_val);
local_o += logits[i];
}
float val = BlockReduce(block_temp_storage).Sum(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
float s_sum_inverse = __fdividef(1.0f, s_sum);
for(int i = tid; i < length; i += blockDim.x)
{
logits[i] = logits[i] * s_sum_inverse;
}
__syncthreads();
// This optimization introduces discrepancy because of different order in FP32 summation
float sum_r[elems_per_thread] = {0.f};
bias_r.v = *((copy_t *) V_bias + lane_id);
for(int ite = warp_id; ite < length; ite += warp_num)
{
key_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id);
//For the first step, we should add bias to key memory cache.
if(step == 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&value_cache[ite * offset] + lane_id) = key_val_r.v;
}
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] += (float)key_val_r.x[i] * logits[ite];
}
}
for (int i = 0; i < elems_per_thread; i++)
{
sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i];
}
__syncthreads();
if (threadIdx.x < WARP_SIZE)
{
#pragma unroll
for (int j = 1; j < warp_num; j++)
{
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + threadIdx.x].x[i];
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = sum_r[i];
}
if (threadIdx.x < WARP_SIZE)
{
*((copy_t *)context_buf + lane_id) = key_val_r.v;
}
}
template<typename T>
__global__
void cross_attention_kernel(
T* query_buf, const T* Q_bias,
T* key_cache, const T* K_bias,
T* value_cache, const T* V_bias,
const int* length_per_sample, T* context_buf,
const bool* finished,
int batch_size, int head_num, int size_per_head, int step, const int seq_len, const T scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
int tid = threadIdx.x;
int bid = blockIdx.x / head_num;
int head_id = blockIdx.x % head_num;
extern __shared__ __align__(sizeof(T)) unsigned s_buf[];
T* sq = reinterpret_cast<T *>(s_buf);
T* logits = reinterpret_cast<T *>(&sq[size_per_head]);
int length = __ldg(&length_per_sample[bid]);
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid;
int qkv_bias_id = head_id * size_per_head + tid;
if(tid < size_per_head)
sq[tid] = query_buf[qkv_id] + Q_bias[qkv_bias_id];
__syncthreads();
for(int ite = 0; ite < length; ++ite)
{
int key_id = bid * (seq_len * head_num * size_per_head) + ite * (head_num * size_per_head)
+ head_id * size_per_head + tid;
T key = tid < size_per_head ? key_cache[key_id] : (T)(0.0f);
//For the first step, we should add bias to key memory cache.
//The KV memory cache only need to be updated at the first step.
if(step == 1 && tid < size_per_head)
{
key += K_bias[head_id * size_per_head + tid];
key_cache[key_id] = key;
}
T val = (tid < size_per_head) ? key * sq[tid] * scalar : (T)(0.0f);
T qk = blockReduceSum(val);
if(threadIdx.x == 0)
logits[ite] = qk;
__syncthreads(); //try to remove
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = tid < length ? (float)logits[tid] : -1e20f;
float max_val = blockReduceMax<float>(local_i);
if(tid == 0)
s_max_val = max_val;
__syncthreads();
local_i -= s_max_val;
float local_o = tid < length ? __expf(local_i) : 0.0f;
float val = blockReduceSum<float>(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
if(tid < length)
logits[tid] = local_o / s_sum;
__syncthreads();
if(tid < size_per_head)
{
T sum = (T)0.0f;
for(int ite = 0; ite < length; ++ite)
{
int value_id = bid * seq_len * head_num * size_per_head + ite * head_num * size_per_head
+ head_id * size_per_head + tid;
T value = value_cache[value_id];
//for the first step, we should add bias to key memory cache
if(step == 1)
{
value += V_bias[head_id * size_per_head + tid];
value_cache[value_id] = value;
}
sum += value * logits[ite];
}
context_buf[bid * head_num * size_per_head + head_id * size_per_head + tid] = sum;
}
}
template <typename T>
void cross_attention_dispatch(T* query_buf, const T* Q_bias,
T* key_cache, const T* K_bias, T* value_cache, const T* V_bias, const int* length,
T* context_buf, const bool* finished,
int batch_size, int head_num, int size_per_head, int step, int seq_len, hipStream_t stream)
{
const int block_sz = ATTENTION_BLOCK_SIZE;
float scalar = 1.f / sqrtf(size_per_head * 1.0f);
dim3 grid(batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT)? 1:0);
switch (cond)
{
case 32:
hipLaunchKernelGGL(( cross_attention_kernel_opt<T, 32, block_sz>), dim3(grid), dim3(block_sz), sizeof(float)*seq_len, stream,
query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished,
batch_size, head_num, step, seq_len, scalar);
break;
case 64:
hipLaunchKernelGGL(( cross_attention_kernel_opt<T, 64, block_sz>), dim3(grid), dim3(block_sz), sizeof(float)*seq_len, stream,
query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished,
batch_size, head_num, step, seq_len, scalar);
break;
case 128:
hipLaunchKernelGGL(( cross_attention_kernel_opt<T, 128, block_sz>), dim3(grid), dim3(block_sz), sizeof(float)*seq_len, stream,
query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished,
batch_size, head_num, step, seq_len, scalar);
break;
default:
// default path
int block_size = 128;
if(seq_len <= 64)
block_size = 64;
else if(seq_len <= 128 && seq_len > size_per_head)
block_size = 128;
else if(seq_len > 128 && seq_len <= 256)
block_size = 256;
else if(seq_len > 256 && seq_len <= 512)
block_size = 512;
else
block_size = 1024;
if(block_size < size_per_head)
block_size = size_per_head;
assert(block_size <= 1024);
dim3 block(block_size);
int shared_size = sizeof(T) * (size_per_head + seq_len);
hipLaunchKernelGGL(( cross_attention_kernel<T>), dim3(grid), dim3(block), shared_size, stream,
query_buf, Q_bias,
key_cache, K_bias,
value_cache, V_bias,
length, context_buf, finished,
batch_size,
head_num, size_per_head, step, seq_len, scalar);
}
}
template void cross_attention_dispatch(
float* query_buf,
const float* Q_bias,
float* key_cache,
const float* K_bias,
float* value_cache,
const float* V_bias,
const int* length,
float* context_buf,
const bool* finished,
int batch_size,
int head_num,
int size_per_head,
int step,
int seq_len,
hipStream_t stream);
template void cross_attention_dispatch(
half* query_buf,
const half* Q_bias,
half* key_cache,
const half* K_bias,
half* value_cache,
const half* V_bias,
const int* length,
half* context_buf,
const bool* finished,
int batch_size,
int head_num,
int size_per_head,
int step,
int seq_len,
hipStream_t stream);
template void fusedQKV_masked_attention_kernelLauncher(
const float* qkv_buf,
const float* qkv_bias,
float* k_cache,
float* v_cache,
float* output,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int max_seq_len,
hipStream_t stream);
template void fusedQKV_masked_attention_kernelLauncher(
const half* qkv_buf,
const half* qkv_bias,
half* k_cache,
half* v_cache,
half* output,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int max_seq_len,
hipStream_t stream);
}//namespace fastertransformer
|
bab8870c82e5f2329ca26af9c03f45af42d8aead.cu
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Open sourced multi-head attention
**/
#include <type_traits>
#include <stdint.h>
#include "fastertransformer/open_decoder.h"
#include "cub/cub.cuh"
#include "fastertransformer/utils/nvtx_utils.h"
#include "masked_multihead_attention.h"
namespace fastertransformer{
const int WARP_SIZE = 32;
const bool ATTENION_OPT = true;
const int ATTENTION_BLOCK_SIZE = 256;
///////////////////////////////////////////////////////////////////////////////////////////////////
template <int HALF_ELEMENTS_PER_WARP_LOAD>
using Copy_half_t =
typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 32, half,
typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 64, int,
typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 128, int2, int4
>::type
>::type
>::type;
template <typename T, int ELEMENTS_PER_WARP_LOAD>
using Copy_t = Copy_half_t<sizeof(T) / sizeof(half) * ELEMENTS_PER_WARP_LOAD>;
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
masked multi-head attention
*/
#define FINAL_MASK 0xffffffff
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
/* Calculate the sum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
// __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f);
val = warpReduceSum<T>(val);
return val;
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
// __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)-1e20f;
val = warpReduceMax(val);
return val;
}
template <int size_per_head, int block_sz, typename T>
__global__
void masked_attention_kernel_opt(
T* __restrict key_buf, T* __restrict value_buf,
T* __restrict query_buf, const T* __restrict self_Q_bias,
T* __restrict key_cache, const T* __restrict self_K_bias,
T* __restrict value_cache, const T* __restrict self_V_bias,
T* __restrict context_buf, const bool* finished,
int batch_size, int head_num, const int step, const T scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
typedef Copy_t<T, size_per_head> copy_t;
const int elems_per_thread = size_per_head / WARP_SIZE;
union Access_t
{
copy_t v;
T x[elems_per_thread]; // supported size 1,2,4
};
typedef struct Float_n_t
{
T x[elems_per_thread]; // supported size 1,2,4
} float_n_t;
__shared__ float_n_t sq[block_sz];
extern __shared__ float logits[]; // use to store the logits from [0~step]
const int tid = threadIdx.x;
const int warp_num = block_sz / WARP_SIZE;
const int bid = blockIdx.x;
const int head_id = blockIdx.x % head_num;
const int warp_id = tid / WARP_SIZE; // warp_id in block
const int lane_id = tid % WARP_SIZE; // lane_id in warp
typedef cub::BlockReduce<float, block_sz> MaxValBlockReduce;
typedef cub::BlockReduce<float, block_sz> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage;
__shared__ typename BlockReduce::TempStorage block_temp_storage;
__shared__ typename cub::WarpReduce<float>::TempStorage temp_storage[warp_num];
int qkv_id = bid * size_per_head;
int qkv_bias_id = head_id * size_per_head;
query_buf = &query_buf[qkv_id];
key_buf = &key_buf[qkv_id];
value_buf = &value_buf[qkv_id];
self_K_bias = &self_K_bias[qkv_bias_id];
key_cache = &key_cache[qkv_id];
self_Q_bias = &self_Q_bias[qkv_bias_id];
self_V_bias = &self_V_bias[qkv_bias_id];
value_cache = &value_cache[qkv_id];
context_buf = &context_buf[qkv_id];
Access_t bias_r, query_buf_r;
Access_t key_val_r, key_buf_r;
Access_t value_val_r, value_buf_r;
// each warp will have its own copy of sq
query_buf_r.v = *((copy_t *)query_buf + lane_id);
key_buf_r.v = *((copy_t *)key_buf + lane_id);
bias_r.v = *((copy_t *)self_Q_bias + lane_id);
float qb_r[elems_per_thread];
for (int i = 0; i < elems_per_thread; ++i)
{
qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i];
}
//offset for each step
int offset = batch_size * head_num * size_per_head;
bias_r.v = *((copy_t *) self_K_bias + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v;
}
float val = 0.f;
for (int i = 0; i < elems_per_thread; i++)
{
val = val + (float)key_val_r.x[i] * qb_r[i] * (float)scalar;
}
float qk = cub::WarpReduce<float>(temp_storage[warp_id]).Sum(val);
if (lane_id == 0)
{
logits[ite] = qk;
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = -1e20f;
for(int i = tid; i < step; i += blockDim.x)
local_i = max(local_i, logits[i]);
float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, cub::Max());
if(tid == 0)
s_max_val = max_val;
__syncthreads();
float local_o = 0.0f;
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = __expf(logits[i] - s_max_val);
local_o += logits[i];
}
float val = BlockReduce(block_temp_storage).Sum(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
float s_sum_inverse = __fdividef(1.0f, s_sum);
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = logits[i] * s_sum_inverse;
}
__syncthreads();
// This optimization introduces discrepancy because of different order in FP32 summation
float sum_r[elems_per_thread] = {0.f};
bias_r.v = *((copy_t *) self_V_bias + lane_id);
value_buf_r.v = *((copy_t *)value_buf + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
value_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = (float)value_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&value_cache[ite * offset] + lane_id) = value_val_r.v;
}
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] += (float)value_val_r.x[i] * logits[ite];
}
}
for (int i = 0; i < elems_per_thread; i++)
{
sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i];
}
__syncthreads();
if (warp_id == 0)
{
#pragma unroll
for (int j = 1; j < warp_num; j++)
{
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + tid].x[i];
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = sum_r[i];
}
if (warp_id == 0)
{
*((copy_t *)context_buf + lane_id) = value_val_r.v;
}
}
template <typename T>
__global__
void masked_attention_kernel(
T* key_buf, T* value_buf,
T* query_buf, const T* self_Q_bias,
T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias,
T* context_buf, const bool* finished,
int batch_size, int head_num, int size_per_head, const int step, const T scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
extern __shared__ __align__(sizeof(T)) unsigned s_buf[];
T* sq = reinterpret_cast<T *>(s_buf);
T* logits = reinterpret_cast<T *>(&sq[size_per_head]);
int tid = threadIdx.x;
int bid = blockIdx.x / head_num;
int head_id = blockIdx.x % head_num;
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid;
int qkv_bias_id = head_id * size_per_head + tid;
if(tid < size_per_head)
sq[tid] = query_buf[qkv_id] + self_Q_bias[qkv_bias_id];
__syncthreads();
//offset for each step
int offset = batch_size * head_num * size_per_head;
for(int ite = 0; ite < step; ++ite)
{
T key = tid < size_per_head ? key_cache[ite * offset + qkv_id] : (T)0.0f;
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1 && tid < size_per_head)
{
key = key_buf[qkv_id] + self_K_bias[qkv_bias_id];
key_cache[ite * offset + qkv_id] = key;
}
T val = (tid < size_per_head) ? key * sq[tid] * scalar : (T)(0.0f);
T qk = blockReduceSum(val);
if(threadIdx.x == 0)
logits[ite] = qk;
__syncthreads(); //try to remove
}
__syncthreads(); //try to remove
__shared__ float s_max_val, s_sum;
float local_i = tid < step ? (float)logits[tid] : -1e20f;
float max_val = blockReduceMax<float>(local_i);
if(tid == 0)
s_max_val = max_val;
__syncthreads();
local_i -= s_max_val;
float local_o = tid < step ? __expf(local_i) : 0.0f;
float val = blockReduceSum<float>(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
if(tid < step)
logits[tid] = local_o / s_sum;
__syncthreads();
if(tid < size_per_head)
{
T sum = (T)0.0f;
for(int ite = 0; ite < step; ++ite)
{
T value = value_cache[ite * offset + qkv_id];
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
value = value_buf[qkv_id] + self_V_bias[qkv_bias_id];
value_cache[ite * offset + qkv_id] = value;
}
sum += value * logits[ite];
}
context_buf[qkv_id] = sum;
}
}
template <typename T>
void masked_attention_dispatch(
T* key_buf, T* value_buf,
T* query_buf, const T* self_Q_bias,
T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias,
T* context_buf, const bool* finished, int max_batch_size, int inference_batch_size,
int head_num, int size_per_head, const int step, const int max_seq_len, cudaStream_t stream)
{
if (max_seq_len < 0) {
const int block_sz = ATTENTION_BLOCK_SIZE;
T scalar = (T)(1.f / sqrtf(size_per_head * 1.0f));
dim3 grid(inference_batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT)? 1:0);
switch (cond)
{
case 32:
masked_attention_kernel_opt<32, block_sz, T><<<grid, block_sz, sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, finished,
max_batch_size, head_num, step, scalar);
break;
case 64:
masked_attention_kernel_opt<64, block_sz, T><<<grid, block_sz, sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias,
key_cache, self_K_bias,
value_cache, self_V_bias,
context_buf,
finished,
max_batch_size, head_num, step, scalar);
break;
case 128:
masked_attention_kernel_opt<128, block_sz, T><<<grid, block_sz, sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, finished,
max_batch_size, head_num, step, scalar);
break;
default:
// default path
int block_size = 128;
//suppose size_per_head <= 128
if(step <= 64)
block_size = 64;
else if(step <= 128 && step > size_per_head)
block_size = 128;
else if(step > 128 && step <= 256)
block_size = 256;
else if(step > 256 && step <= 512)
block_size = 512;
else
block_size = 1024;
if((int)block_size < size_per_head)
block_size = size_per_head;
assert(block_size <= 1024);
dim3 block(block_size);
T scalar = 1 / sqrtf(size_per_head * 1.0f);
int shared_size = sizeof(T) * (size_per_head + step);
masked_attention_kernel<T><<<grid, block, shared_size, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias,
key_cache, self_K_bias,
value_cache, self_V_bias,
context_buf, finished, max_batch_size,
head_num, size_per_head, step, scalar);
}
}
else {
assert(step > 0);
assert(size_per_head == 32 || size_per_head == 64 || size_per_head == 128);
using DataType = typename std::conditional<sizeof(T) == 4, float, uint16_t>::type;
// Prepare the parameters.
Masked_multihead_attention_params<DataType> params;
memset(¶ms, 0, sizeof(params));
params.q_bias = reinterpret_cast<const DataType *>(self_Q_bias);
params.k_bias = reinterpret_cast<const DataType *>(self_K_bias);
params.v_bias = reinterpret_cast<const DataType *>(self_V_bias);
// Set the output buffer.
params.out = reinterpret_cast<DataType *>(context_buf);
// Set the input buffers.
params.q = reinterpret_cast<const DataType *>(query_buf);
params.k = reinterpret_cast<const DataType *>(key_buf);
params.v = reinterpret_cast<const DataType *>(value_buf);
params.stride = 0;
params.finished = const_cast<bool*>(finished);
params.k_cache = reinterpret_cast<DataType *>(key_cache);
params.v_cache = reinterpret_cast<DataType *>(value_cache);
params.batch_size = inference_batch_size;
params.seq_length = max_seq_len;
params.timestep = step-1;
params.num_heads = head_num;
params.hidden_size_per_head = size_per_head;
params.inv_sqrt_dh = 1.F / sqrtf((float) params.hidden_size_per_head);
masked_multihead_attention(params, stream);
}
}
template void masked_attention_dispatch(
float* key_buf,
float* value_buf,
float* query_buf,
const float* self_Q_bias,
float* key_cache,
const float* self_K_bias,
float* value_cache,
const float* self_V_bias,
float* context_buf,
const bool* finished,
int max_batch_size,
int inference_batch_size,
int head_num,
int size_per_head,
const int step,
const int max_seq_size,
cudaStream_t stream);
template void masked_attention_dispatch(
half* key_buf,
half* value_buf,
half* query_buf,
const half* self_Q_bias,
half* key_cache,
const half* self_K_bias,
half* value_cache,
const half* self_V_bias,
half* context_buf,
const bool* finished,
int max_batch_size,
int inference_batch_size,
int head_num,
int size_per_head,
const int step,
const int max_seq_size,
cudaStream_t stream);
template <int size_per_head, int block_sz, typename T>
__global__
void fusedQKV_masked_attention_kernel_opt(
const T* __restrict qkv_buf, const T* __restrict qkv_bias,
T* __restrict key_cache,
T* __restrict value_cache,
T* __restrict context_buf, const bool* finished, int batch_size, int head_num, const int step, const T scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
typedef Copy_t<T, size_per_head> copy_t;
const int elems_per_thread = size_per_head / WARP_SIZE;
union Access_t
{
copy_t v;
T x[elems_per_thread]; // supported size 1,2,4
};
typedef struct Float_n_t
{
T x[elems_per_thread]; // supported size 1,2,4
} float_n_t;
__shared__ float_n_t sq[block_sz];
extern __shared__ float logits[]; // use to store the logits from [0~step]
const int tid = threadIdx.x;
const int warp_num = block_sz / WARP_SIZE;
const int bid = blockIdx.x;
const int head_id = blockIdx.x % head_num;
const int warp_id = tid / WARP_SIZE; // warp_id in block
const int lane_id = tid % WARP_SIZE; // lane_id in warp
const int batch_id = bid / head_num;
const int hidden_units = head_num * size_per_head;
typedef cub::BlockReduce<float, block_sz> MaxValBlockReduce;
typedef cub::BlockReduce<float, block_sz> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage;
__shared__ typename BlockReduce::TempStorage block_temp_storage;
__shared__ typename cub::WarpReduce<float>::TempStorage temp_storage[warp_num];
int qkv_id = batch_id * 3 * hidden_units + head_id * size_per_head;
int qkv_bias_id = head_id * size_per_head;
int cache_qkv_id = bid * size_per_head;
const T* query_buf = qkv_buf + qkv_id;
const T* key_buf = qkv_buf + hidden_units + qkv_id;
const T* value_buf = qkv_buf + 2 * hidden_units + qkv_id;
const T* self_Q_bias = qkv_bias + qkv_bias_id;
const T* self_K_bias = qkv_bias + hidden_units + qkv_bias_id;
const T* self_V_bias = qkv_bias + 2 * hidden_units + qkv_bias_id;
value_cache = value_cache + cache_qkv_id;
key_cache = key_cache + cache_qkv_id;
context_buf = context_buf + cache_qkv_id;
Access_t bias_r, query_buf_r;
Access_t key_val_r, key_buf_r;
Access_t value_val_r, value_buf_r;
// each warp will have its own copy of sq
query_buf_r.v = *((copy_t *)query_buf + lane_id);
key_buf_r.v = *((copy_t *)key_buf + lane_id);
bias_r.v = *((copy_t *)self_Q_bias + lane_id);
float qb_r[elems_per_thread];
for (int i = 0; i < elems_per_thread; ++i)
{
qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i];
}
//offset for each step
int offset = batch_size * hidden_units;
bias_r.v = *((copy_t *) self_K_bias + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v;
}
float val = 0.f;
for (int i = 0; i < elems_per_thread; i++)
{
val = val + (float)key_val_r.x[i] * qb_r[i] * (float)scalar;
}
float qk = cub::WarpReduce<float>(temp_storage[warp_id]).Sum(val);
if (lane_id == 0)
{
logits[ite] = qk;
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = -1e20f;
for(int i = tid; i < step; i += blockDim.x)
local_i = max(local_i, logits[i]);
float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, cub::Max());
if(tid == 0)
s_max_val = max_val;
__syncthreads();
float local_o = 0.0f;
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = __expf(logits[i] - s_max_val);
local_o += logits[i];
}
float val = BlockReduce(block_temp_storage).Sum(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
float s_sum_inverse = __fdividef(1.0f, s_sum);
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = logits[i] * s_sum_inverse;
}
__syncthreads();
// This optimization introduces discrepancy because of different order in FP32 summation
float sum_r[elems_per_thread] = {0.f};
bias_r.v = *((copy_t *) self_V_bias + lane_id);
value_buf_r.v = *((copy_t *)value_buf + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
value_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = (float)value_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&value_cache[ite * offset] + lane_id) = value_val_r.v;
}
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] += (float)value_val_r.x[i] * logits[ite];
}
}
for (int i = 0; i < elems_per_thread; i++)
{
sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i];
}
__syncthreads();
if (warp_id == 0)
{
#pragma unroll
for (int j = 1; j < warp_num; j++)
{
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + tid].x[i];
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = sum_r[i];
}
if (warp_id == 0)
{
*((copy_t *)context_buf + lane_id) = value_val_r.v;
}
}
template <typename T>
void fusedQKV_masked_attention_dispatch(
const T* qkv_buf, const T* qkv_bias,
T* key_cache, T* value_cache,
T* context_buf, const bool* finished, int max_batch_size, int inference_batch_size,
int head_num, int size_per_head, const int step, const int max_seq_len, cudaStream_t stream)
{
if (max_seq_len < 0) {
const int block_sz = ATTENTION_BLOCK_SIZE;
T scalar = (T)(1.f / sqrtf(size_per_head * 1.0f));
dim3 grid(inference_batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT)? 1:0);
switch (cond)
{
case 32:
fusedQKV_masked_attention_kernel_opt<32, block_sz, T><<<grid, block_sz, sizeof(float)*step, stream>>>(
qkv_buf, qkv_bias,
key_cache, value_cache,
context_buf,
finished,
max_batch_size, head_num, step, scalar);
break;
case 64:
fusedQKV_masked_attention_kernel_opt<64, block_sz, T><<<grid, block_sz, sizeof(float)*step, stream>>>(
qkv_buf, qkv_bias,
key_cache,
value_cache,
context_buf,
finished,
max_batch_size, head_num, step, scalar);
break;
case 128:
fusedQKV_masked_attention_kernel_opt<128, block_sz, T><<<grid, block_sz, sizeof(float)*step, stream>>>(
qkv_buf, qkv_bias,
key_cache,
value_cache,
context_buf,
finished,
max_batch_size, head_num, step, scalar);
break;
default:
assert(false);
}
}
else {
using DataType = typename std::conditional<sizeof(T) == 4, float, uint16_t>::type;
// Prepare the parameters.
Masked_multihead_attention_params<DataType> params;
memset(¶ms, 0, sizeof(params));
int hidden_units = head_num * size_per_head;
params.q_bias = reinterpret_cast<const DataType *>(qkv_bias);
params.k_bias = reinterpret_cast<const DataType *>(qkv_bias) + hidden_units;
params.v_bias = reinterpret_cast<const DataType *>(qkv_bias) + 2 * hidden_units;
// Set the output buffer.
params.out = reinterpret_cast<DataType *>(context_buf);
// Set the input buffers.
params.q = reinterpret_cast<const DataType *>(qkv_buf);
params.k = reinterpret_cast<const DataType *>(qkv_buf) + hidden_units;
params.v = reinterpret_cast<const DataType *>(qkv_buf) + 2 * hidden_units;
params.stride = 3 * hidden_units;
params.finished = const_cast<bool*>(finished);
params.k_cache = reinterpret_cast<DataType *>(key_cache);
params.v_cache = reinterpret_cast<DataType *>(value_cache);
params.batch_size = inference_batch_size;
params.seq_length = max_seq_len;
params.timestep = step-1;
params.num_heads = head_num;
params.hidden_size_per_head = size_per_head;
params.inv_sqrt_dh = 1.F / sqrtf((float) params.hidden_size_per_head);
masked_multihead_attention(params, stream);
}
}
template void fusedQKV_masked_attention_dispatch(
const float* qkv_buf,
const float* qkv_bias,
float* key_cache,
float* value_cache,
float* context_buf,
const bool* finished,
int max_batch_size,
int inference_batch_size,
int head_num,
int size_per_head,
const int step,
const int max_seq_len,
cudaStream_t stream);
template void fusedQKV_masked_attention_dispatch(
const half* qkv_buf,
const half* qkv_bias,
half* key_cache,
half* value_cache,
half* context_buf,
const bool* finished,
int max_batch_size,
int inference_batch_size,
int head_num,
int size_per_head,
const int step,
const int max_seq_len,
cudaStream_t stream);
template <typename T>
void fusedQKV_masked_attention_kernelLauncher(
const T* qkv_buf,
const T* qkv_bias,
T* k_cache,
T* v_cache,
T* output,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int max_seq_len,
cudaStream_t stream)
{
fusedQKV_masked_attention_dispatch(qkv_buf,
qkv_bias,
k_cache,
v_cache,
output,
nullptr,
batch_size,
batch_size,
head_num,
size_per_head,
seq_len,
max_seq_len,
stream);
}
template<typename T>
__global__ void transpose_4d(T* dst, T* src,
const int dim0,
const int dim1,
const int dim2,
const int dim3,
const int dim0_leading_dim,
const int ite)
{
// transpose from [dim0, dim1, dim2, dim3] to [dim2, X, dim1, dim3]
// where the dimension of X is dim0_leading_dim, and offset is ite * dim0
for(int i = threadIdx.x + blockIdx.x * blockDim.x; i < dim0 * dim1 * dim2 * dim3; i+= blockDim.x * gridDim.x)
{
int index = i;
const int d3 = index % dim3;
index = (index - d3) / dim3;
const int d2 = index % dim2;
index = (index - d2) / dim2;
const int d1 = index % dim1;
index = (index - d1) / dim1;
const int d0 = index % dim0;
index = (index - d0) / dim0;
dst[d2 * dim0_leading_dim * dim1 * dim3 + (d0 + dim0 * ite) * dim1 * dim3 + d1 * dim3 + d3] = src[i];
}
}
template<>
__global__ void transpose_4d(half* dst, half* src,
const int dim0,
const int dim1,
const int dim2,
const int dim3,
const int dim0_leading_dim,
const int ite)
{
half2 *dst_ptr = (half2 *) dst;
half2 *src_ptr = (half2 *) src;
const int half_dim3 = dim3 / 2;
// transpose from [dim0, dim1, dim2, half_dim3] to [dim2, dim0, dim1, half_dim3]
// where the dimension of X is dim0_leading_dim, and offset is ite * dim0
for(int i = threadIdx.x + blockIdx.x * blockDim.x; i < dim0 * dim1 * dim2 * half_dim3; i+= blockDim.x * gridDim.x)
{
int index = i;
const int d3 = index % half_dim3;
index = (index - d3) / half_dim3;
const int d2 = index % dim2;
index = (index - d2) / dim2;
const int d1 = index % dim1;
index = (index - d1) / dim1;
const int d0 = index % dim0;
index = (index - d0) / dim0;
dst_ptr[d2 * dim0_leading_dim * dim1 * half_dim3 + (d0 + dim0 * ite) * dim1 * half_dim3 + d1 * half_dim3 + d3] = src_ptr[i];
}
}
template<typename T>
void transpose_4d_kernelLauncher(T* dst, T* src,
const int local_batch_size,
const int seq_len,
const int size_per_head,
const int local_hidden_units,
const int local_head_num,
const int batch_size,
const int ite,
cudaStream_t stream)
{
transpose_4d<<<local_batch_size * seq_len * local_hidden_units / 512, 512 / (4 / (sizeof(T))), 0, stream>>>(
dst, src,
local_batch_size, local_head_num,
seq_len, size_per_head, batch_size, ite);
}
template void transpose_4d_kernelLauncher(
float* dst,
float* src,
const int local_batch_size,
const int seq_len,
const int size_per_head,
const int local_hidden_units,
const int local_head_num,
const int batch_size,
const int ite,
cudaStream_t stream);
template void transpose_4d_kernelLauncher(
half* dst,
half* src,
const int local_batch_size,
const int seq_len,
const int size_per_head,
const int local_hidden_units,
const int local_head_num,
const int batch_size,
const int ite,
cudaStream_t stream);
#define NEW_TRANSPOSE_BATCH_MAJOR 1
template<typename T>
__global__ void transpose_4d_batch_major_k_cache(T* k_dst, const T* k_src,
const int head_num,
const int size_per_head,
const int seq_len,
const int max_seq_len)
{
const int batch_id = blockIdx.y;
const int head_id = blockIdx.z;
constexpr int X_ELEMS = (sizeof(T) == 4)? 4 : 8;
auto key_src = reinterpret_cast<const uint4*>(k_src + batch_id * head_num * size_per_head * seq_len + head_id * size_per_head * seq_len);
auto key_dst = reinterpret_cast<uint4*>(k_dst + batch_id * head_num * size_per_head * max_seq_len + head_id * size_per_head * max_seq_len);
const int out_idx = blockIdx.x * blockDim.x + threadIdx.x;
int size_per_head_div_x = size_per_head / X_ELEMS;
if (out_idx >= head_num * size_per_head_div_x * max_seq_len) return;
int idx = out_idx;
const int k_seq_len_id = idx % max_seq_len;
idx = (idx - k_seq_len_id) / max_seq_len;
const int k_head_size_id = idx % size_per_head_div_x;
if (k_seq_len_id < seq_len)
key_dst[out_idx] = key_src[k_seq_len_id * size_per_head_div_x + k_head_size_id];
}
template<typename T>
__global__ void transpose_4d_batch_major_v_cache(T* v_dst, const T* v_src,
const int head_num,
const int size_per_head,
const int seq_len,
const int max_seq_len)
{
const int batch_id = blockIdx.y;
const int head_id = blockIdx.z;
// 16 byte loads will handle "x" dimension
auto val_src = reinterpret_cast<const uint4*>(v_src + batch_id * head_num * size_per_head * seq_len + head_id * size_per_head * seq_len);
auto val_dst = reinterpret_cast<uint4*>(v_dst + batch_id * head_num * size_per_head * max_seq_len + head_id * size_per_head * max_seq_len);
// idx is over output dimension L * size_per_head / x for values
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
constexpr int X_ELEMS = (sizeof(T) == 4)? 4 : 8;
const int size_per_head_div_x = size_per_head / X_ELEMS;
if (idx >= size_per_head_div_x * seq_len) return;
val_dst[idx] = val_src[idx];
}
template<typename T>
__global__ void transpose_4d_batch_major(T* k_dst, T* v_dst,
const T* k_src, const T* v_src,
const int head_num,
const int size_per_head,
const int seq_len,
const int max_seq_len)
{
const int hidden_dim = head_num * size_per_head;
const int x = (sizeof(T) == 4)? 4 : 8;
const int size_per_head_split = size_per_head / x;
const int batch_id = blockIdx.x;
const int seq_id = blockIdx.y;
for(int id = threadIdx.x; id < head_num * size_per_head_split * x; id += blockDim.x)
{
int tmp_id = id;
int x_id = tmp_id % x;
tmp_id = (tmp_id - x_id) / x;
int size_id = tmp_id % size_per_head_split;
tmp_id = (tmp_id - size_id) / size_per_head_split;
int head_id = tmp_id % head_num;
// key: [B, head_num, L, size_per_head / x, x] -> [B, head_num, size_per_head / x, L, x]
k_dst[batch_id * hidden_dim * max_seq_len + head_id * size_per_head * max_seq_len + size_id * max_seq_len * x + seq_id * x + x_id] =
k_src[batch_id * hidden_dim * seq_len + head_id * size_per_head * seq_len + seq_id * size_per_head + size_id * x + x_id];
// value: [B, head_num, L, size_per_head / x, x] -> [B, head_num, L, size_per_head/x, x]
v_dst[batch_id * hidden_dim * max_seq_len + head_id * size_per_head * max_seq_len + seq_id * size_per_head + size_id * x + x_id] =
v_src[batch_id * hidden_dim * seq_len + head_id * size_per_head * seq_len + seq_id * size_per_head + size_id * x + x_id];
}
}
template<typename T>
void transpose_4d_batch_major_kernelLauncher(T* k_dst, T* v_dst,
const T* k_src, const T* v_src,
const int local_batch_size,
const int seq_len,
const int max_seq_len,
const int size_per_head,
const int local_head_num,
cudaStream_t stream)
{
constexpr int block_sz = 128;
#if NEW_TRANSPOSE_BATCH_MAJOR == 1
constexpr int x = (sizeof(T) == 4)? 4 : 8;
int size = max_seq_len * size_per_head / x;
dim3 grid((size + block_sz - 1) / block_sz, local_batch_size, local_head_num);
dim3 grid_v((seq_len * size_per_head / x + block_sz - 1) / block_sz, local_batch_size, local_head_num);
transpose_4d_batch_major_k_cache<<<grid, block_sz, 0, stream>>>(
k_dst, k_src,
local_head_num,
size_per_head,
seq_len,
max_seq_len
);
transpose_4d_batch_major_v_cache<<<grid_v, block_sz, 0, stream>>>(
v_dst, v_src,
local_head_num,
size_per_head,
seq_len,
max_seq_len
);
#else
dim3 grid(local_batch_size, seq_len);
transpose_4d_batch_major<<<grid, block_sz, 0, stream>>>(
k_dst, v_dst,
k_src, v_src,
local_head_num,
size_per_head,
seq_len,
max_seq_len
);
#endif
}
template void transpose_4d_batch_major_kernelLauncher(float* k_dst, float* v_dst,
const float* k_src, const float* v_src,
const int local_batch_size,
const int seq_len,
const int max_seq_len,
const int size_per_head,
const int local_head_num,
cudaStream_t stream);
template void transpose_4d_batch_major_kernelLauncher(half* k_dst, half* v_dst,
const half* k_src, const half* v_src,
const int local_batch_size,
const int seq_len,
const int max_seq_len,
const int size_per_head,
const int local_head_num,
cudaStream_t stream);
template<typename T>
__global__
void add_QKV_bias_generalized_2(const T* __restrict QKV,
const T* __restrict bias,
T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int word_per_block)
{
// QKV: [batch x sequence length, hidden * 3]
const T* data_ptr;
T* buf_ptr;
int n = head_num * size_per_head;
const int blocks_per_word = n / blockDim.x;
const int blocks_per_buffer = gridDim.x / 3;
const int qkv_id = blockIdx.x / blocks_per_buffer;
const int block_id_in_buffer = blockIdx.x % blocks_per_buffer;
const int offset = block_id_in_buffer * blockDim.x + threadIdx.x;
const int bias_id = offset % n;
T* buf_ptrs[3] = {q_buf_, k_buf_, v_buf_};
const int bid = blockIdx.x;
for(int index = threadIdx.x; index < n; index += blockDim.x)
{
buf_ptrs[index / n][bid * n + index % n] = QKV[bid * 3 * n + index] + __ldg(&bias[index]);
}
}
template <typename T, int size_per_head, int block_sz>
__global__
void cross_attention_kernel_opt(
T* __restrict query_buf, const T* __restrict Q_bias,
T* __restrict key_cache, const T* __restrict K_bias,
T* __restrict value_cache, const T* __restrict V_bias,
const int* length_per_sample, T* __restrict context_buf,
const bool* finished,
int batch_size, int head_num, const int step, const int seq_len, const float scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
typedef Copy_t<T, size_per_head> copy_t;
const int elems_per_thread = size_per_head / WARP_SIZE;
union Access_t
{
copy_t v;
T x[elems_per_thread]; // supported size 1,2,4
};
typedef struct Float_n_t
{
float x[elems_per_thread]; // supported size 1,2,4
} float_n_t;
__shared__ float_n_t sq[block_sz];
extern __shared__ float logits[]; // use to store the logits from [0~step]
const int warp_id = threadIdx.x / WARP_SIZE;
const int warp_num = block_sz / WARP_SIZE;
typedef cub::BlockReduce<float, block_sz> MaxValBlockReduce;
typedef cub::BlockReduce<float, block_sz> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage;
__shared__ typename BlockReduce::TempStorage block_temp_storage;
__shared__ typename cub::WarpReduce<float>::TempStorage temp_storage[warp_num];
const int tid = threadIdx.x;
const int bid = blockIdx.x / head_num;
const int head_id = blockIdx.x % head_num;
int length = __ldg(&length_per_sample[bid]);
const int lane_id = tid % WARP_SIZE;
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head;
int qkv_bias_id = head_id * size_per_head;
int key_value_id = bid * (seq_len * head_num * size_per_head) +
+ head_id * size_per_head;
query_buf = &query_buf[qkv_id];
K_bias = &K_bias[qkv_bias_id];
key_cache = &key_cache[key_value_id];
Q_bias = &Q_bias[qkv_bias_id];
V_bias = &V_bias[qkv_bias_id];
value_cache = &value_cache[key_value_id];
context_buf = &context_buf[qkv_id];
Access_t bias_r, key_val_r, query_buf_r;
// each warp will have its own copy of sq
query_buf_r.v = *((copy_t *)query_buf + lane_id);
bias_r.v = *((copy_t *)Q_bias + lane_id);
float qb_r[elems_per_thread];
for (int i = 0; i < elems_per_thread; ++i)
{
qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i];
}
//offset for each step
int offset = head_num * size_per_head;
bias_r.v = *((copy_t *) K_bias + lane_id);
for(int ite = warp_id; ite < length; ite += warp_num)
{
key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id);
//For the first step, we should add bias to key memory cache.
//The KV memory cache only need to be updated at the first step.
if (step == 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v;
}
float val = 0.f;
for (int i = 0; i < elems_per_thread; i++)
{
val = val + (float)key_val_r.x[i] * qb_r[i] * scalar;
}
float qk = cub::WarpReduce<float>(temp_storage[warp_id]).Sum(val);
if (lane_id == 0)
{
logits[ite] = qk;
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = -1e20f;
for(int i = tid; i < length; i += blockDim.x)
local_i = max(local_i, logits[i]);
float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, cub::Max());
if(tid == 0)
s_max_val = max_val;
__syncthreads();
float local_o = 0.0f;
for(int i = tid; i < length; i += blockDim.x)
{
logits[i] = __expf(logits[i] - s_max_val);
local_o += logits[i];
}
float val = BlockReduce(block_temp_storage).Sum(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
float s_sum_inverse = __fdividef(1.0f, s_sum);
for(int i = tid; i < length; i += blockDim.x)
{
logits[i] = logits[i] * s_sum_inverse;
}
__syncthreads();
// This optimization introduces discrepancy because of different order in FP32 summation
float sum_r[elems_per_thread] = {0.f};
bias_r.v = *((copy_t *) V_bias + lane_id);
for(int ite = warp_id; ite < length; ite += warp_num)
{
key_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id);
//For the first step, we should add bias to key memory cache.
if(step == 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&value_cache[ite * offset] + lane_id) = key_val_r.v;
}
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] += (float)key_val_r.x[i] * logits[ite];
}
}
for (int i = 0; i < elems_per_thread; i++)
{
sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i];
}
__syncthreads();
if (threadIdx.x < WARP_SIZE)
{
#pragma unroll
for (int j = 1; j < warp_num; j++)
{
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + threadIdx.x].x[i];
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = sum_r[i];
}
if (threadIdx.x < WARP_SIZE)
{
*((copy_t *)context_buf + lane_id) = key_val_r.v;
}
}
template<typename T>
__global__
void cross_attention_kernel(
T* query_buf, const T* Q_bias,
T* key_cache, const T* K_bias,
T* value_cache, const T* V_bias,
const int* length_per_sample, T* context_buf,
const bool* finished,
int batch_size, int head_num, int size_per_head, int step, const int seq_len, const T scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
int tid = threadIdx.x;
int bid = blockIdx.x / head_num;
int head_id = blockIdx.x % head_num;
extern __shared__ __align__(sizeof(T)) unsigned s_buf[];
T* sq = reinterpret_cast<T *>(s_buf);
T* logits = reinterpret_cast<T *>(&sq[size_per_head]);
int length = __ldg(&length_per_sample[bid]);
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid;
int qkv_bias_id = head_id * size_per_head + tid;
if(tid < size_per_head)
sq[tid] = query_buf[qkv_id] + Q_bias[qkv_bias_id];
__syncthreads();
for(int ite = 0; ite < length; ++ite)
{
int key_id = bid * (seq_len * head_num * size_per_head) + ite * (head_num * size_per_head)
+ head_id * size_per_head + tid;
T key = tid < size_per_head ? key_cache[key_id] : (T)(0.0f);
//For the first step, we should add bias to key memory cache.
//The KV memory cache only need to be updated at the first step.
if(step == 1 && tid < size_per_head)
{
key += K_bias[head_id * size_per_head + tid];
key_cache[key_id] = key;
}
T val = (tid < size_per_head) ? key * sq[tid] * scalar : (T)(0.0f);
T qk = blockReduceSum(val);
if(threadIdx.x == 0)
logits[ite] = qk;
__syncthreads(); //try to remove
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = tid < length ? (float)logits[tid] : -1e20f;
float max_val = blockReduceMax<float>(local_i);
if(tid == 0)
s_max_val = max_val;
__syncthreads();
local_i -= s_max_val;
float local_o = tid < length ? __expf(local_i) : 0.0f;
float val = blockReduceSum<float>(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
if(tid < length)
logits[tid] = local_o / s_sum;
__syncthreads();
if(tid < size_per_head)
{
T sum = (T)0.0f;
for(int ite = 0; ite < length; ++ite)
{
int value_id = bid * seq_len * head_num * size_per_head + ite * head_num * size_per_head
+ head_id * size_per_head + tid;
T value = value_cache[value_id];
//for the first step, we should add bias to key memory cache
if(step == 1)
{
value += V_bias[head_id * size_per_head + tid];
value_cache[value_id] = value;
}
sum += value * logits[ite];
}
context_buf[bid * head_num * size_per_head + head_id * size_per_head + tid] = sum;
}
}
template <typename T>
void cross_attention_dispatch(T* query_buf, const T* Q_bias,
T* key_cache, const T* K_bias, T* value_cache, const T* V_bias, const int* length,
T* context_buf, const bool* finished,
int batch_size, int head_num, int size_per_head, int step, int seq_len, cudaStream_t stream)
{
const int block_sz = ATTENTION_BLOCK_SIZE;
float scalar = 1.f / sqrtf(size_per_head * 1.0f);
dim3 grid(batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT)? 1:0);
switch (cond)
{
case 32:
cross_attention_kernel_opt<T, 32, block_sz><<<grid, block_sz, sizeof(float)*seq_len, stream>>>(
query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished,
batch_size, head_num, step, seq_len, scalar);
break;
case 64:
cross_attention_kernel_opt<T, 64, block_sz><<<grid, block_sz, sizeof(float)*seq_len, stream>>>(
query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished,
batch_size, head_num, step, seq_len, scalar);
break;
case 128:
cross_attention_kernel_opt<T, 128, block_sz><<<grid, block_sz, sizeof(float)*seq_len, stream>>>(
query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished,
batch_size, head_num, step, seq_len, scalar);
break;
default:
// default path
int block_size = 128;
if(seq_len <= 64)
block_size = 64;
else if(seq_len <= 128 && seq_len > size_per_head)
block_size = 128;
else if(seq_len > 128 && seq_len <= 256)
block_size = 256;
else if(seq_len > 256 && seq_len <= 512)
block_size = 512;
else
block_size = 1024;
if(block_size < size_per_head)
block_size = size_per_head;
assert(block_size <= 1024);
dim3 block(block_size);
int shared_size = sizeof(T) * (size_per_head + seq_len);
cross_attention_kernel<T><<<grid, block, shared_size, stream>>>(
query_buf, Q_bias,
key_cache, K_bias,
value_cache, V_bias,
length, context_buf, finished,
batch_size,
head_num, size_per_head, step, seq_len, scalar);
}
}
template void cross_attention_dispatch(
float* query_buf,
const float* Q_bias,
float* key_cache,
const float* K_bias,
float* value_cache,
const float* V_bias,
const int* length,
float* context_buf,
const bool* finished,
int batch_size,
int head_num,
int size_per_head,
int step,
int seq_len,
cudaStream_t stream);
template void cross_attention_dispatch(
half* query_buf,
const half* Q_bias,
half* key_cache,
const half* K_bias,
half* value_cache,
const half* V_bias,
const int* length,
half* context_buf,
const bool* finished,
int batch_size,
int head_num,
int size_per_head,
int step,
int seq_len,
cudaStream_t stream);
template void fusedQKV_masked_attention_kernelLauncher(
const float* qkv_buf,
const float* qkv_bias,
float* k_cache,
float* v_cache,
float* output,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int max_seq_len,
cudaStream_t stream);
template void fusedQKV_masked_attention_kernelLauncher(
const half* qkv_buf,
const half* qkv_bias,
half* k_cache,
half* v_cache,
half* output,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int max_seq_len,
cudaStream_t stream);
}//namespace fastertransformer
|
3160611482f47afcbfc87ae34bbf7d5a5011d0de.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "tdcuda.cuh"
using namespace std;
int main(int argc, char** argv){
srand(time(NULL));
run_exercices();
return 0;
}
int run_exercices(){
float *v1, *v2, *s_cpu, *s_gpu, *d_v1, *d_v2, *d_s;
double start, end;
double gpu_time, cpu_time;
int N = VEC_SIZE/DIV_MAX;
v1 = (float*)malloc(N*sizeof(float));
v2 = (float*)malloc(N*sizeof(float));
s_gpu = (float*)malloc(N*sizeof(float));
s_cpu = (float*)malloc(N*sizeof(float));
hipMalloc(&d_v1, N*sizeof(float));
hipMalloc(&d_v2, N*sizeof(float));
hipMalloc(&d_s, N*sizeof(float));
for (int i = 0; i < N; i++) {
v1[i] = rand_float();
v2[i] = rand_float();
}
// Exercice 2
cout << "Exercice 2\n==============================" << endl;
start = omp_get_wtime();
for( int j = 0; j< MAX_ITER; j++){
exercice2_cpu(2.0f,v1,v2,s_cpu,VEC_SIZE/DIV_MAX);
}
end = omp_get_wtime();
cpu_time = (end-start)/(MAX_ITER);
std::cout << "Cpu Time: " << cpu_time << std::endl;
hipMemcpy(d_v1, v1, (VEC_SIZE/DIV_MAX)*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_v2, v2, (VEC_SIZE/DIV_MAX)*sizeof(float), hipMemcpyHostToDevice);
start = omp_get_wtime();
for( int j = 0; j< MAX_ITER; j++){
exercice2_cuda_nomemcpy(d_v1,d_v2,d_s,32,VEC_SIZE/DIV_MAX);
}
end = omp_get_wtime();
hipMemcpy(s_gpu, d_s, (VEC_SIZE/DIV_MAX)*sizeof(float), hipMemcpyDeviceToHost);
gpu_time = (end-start)/(MAX_ITER);
std::cout << "Gpu Time (without memcpy): " << gpu_time << " Acceleration Ratio: " << cpu_time/gpu_time << std::endl;
start = omp_get_wtime();
for( int j = 0; j< MAX_ITER; j++){
exercice2_cuda_withmemcpy(d_v1,v1,d_v2,v2,d_s,s_gpu,32,VEC_SIZE/DIV_MAX);
}
end = omp_get_wtime();
gpu_time = (end-start)/(MAX_ITER);
std::cout << "Gpu Time: " << gpu_time << " Acceleration Ratio: " << cpu_time/gpu_time << std::endl << std::endl;
hipFree(d_v1);
hipFree(d_v2);
hipFree(d_s);
free(v1);
free(v2);
free(s_cpu);
free(s_gpu);
return 0;
}
void exercice2_cuda_withmemcpy(float* d_x, float* x, float* d_y, float* y, float* d_s, float* s, int k, int size){
hipMemcpy(d_x, x, size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, size*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( saxpy_kernel), dim3((size+k)/k), dim3(k), 0, 0, size, 2.0f, d_x, d_y, d_s);
hipMemcpy(s, d_s, size*sizeof(float), hipMemcpyDeviceToHost);
}
void exercice2_cuda_nomemcpy(float* d_x, float* d_y, float* d_s, int k, int size){
hipLaunchKernelGGL(( saxpy_kernel), dim3((size+k)/k), dim3(k), 0, 0, size, 2.0f, d_x, d_y, d_s);
hipDeviceSynchronize();
}
void exercice2_cpu(float a, float* x, float* y, float* s, int size){
#pragma omp parallel for num_threads(8)
for (int i=0; i<size; i++)
{
s[i] = a*x[i] + y[i];
}
}
__global__ void saxpy_kernel(int n, float a, float *v1, float *v2, float *s){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < n ) s[i] = a*v1[i] + v2[i];
}
__global__ void mean_kernel(int n, float* v1, float* v2, float* res){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if( i < n ) res[i] = (v1[i] + v2[i])/2;
}
float rand_float(){
return (float)((rand() % 360) - 180.0);
}
|
3160611482f47afcbfc87ae34bbf7d5a5011d0de.cu
|
#include "tdcuda.cuh"
using namespace std;
int main(int argc, char** argv){
srand(time(NULL));
run_exercices();
return 0;
}
int run_exercices(){
float *v1, *v2, *s_cpu, *s_gpu, *d_v1, *d_v2, *d_s;
double start, end;
double gpu_time, cpu_time;
int N = VEC_SIZE/DIV_MAX;
v1 = (float*)malloc(N*sizeof(float));
v2 = (float*)malloc(N*sizeof(float));
s_gpu = (float*)malloc(N*sizeof(float));
s_cpu = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_v1, N*sizeof(float));
cudaMalloc(&d_v2, N*sizeof(float));
cudaMalloc(&d_s, N*sizeof(float));
for (int i = 0; i < N; i++) {
v1[i] = rand_float();
v2[i] = rand_float();
}
// Exercice 2
cout << "Exercice 2\n==============================" << endl;
start = omp_get_wtime();
for( int j = 0; j< MAX_ITER; j++){
exercice2_cpu(2.0f,v1,v2,s_cpu,VEC_SIZE/DIV_MAX);
}
end = omp_get_wtime();
cpu_time = (end-start)/(MAX_ITER);
std::cout << "Cpu Time: " << cpu_time << std::endl;
cudaMemcpy(d_v1, v1, (VEC_SIZE/DIV_MAX)*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_v2, v2, (VEC_SIZE/DIV_MAX)*sizeof(float), cudaMemcpyHostToDevice);
start = omp_get_wtime();
for( int j = 0; j< MAX_ITER; j++){
exercice2_cuda_nomemcpy(d_v1,d_v2,d_s,32,VEC_SIZE/DIV_MAX);
}
end = omp_get_wtime();
cudaMemcpy(s_gpu, d_s, (VEC_SIZE/DIV_MAX)*sizeof(float), cudaMemcpyDeviceToHost);
gpu_time = (end-start)/(MAX_ITER);
std::cout << "Gpu Time (without memcpy): " << gpu_time << " Acceleration Ratio: " << cpu_time/gpu_time << std::endl;
start = omp_get_wtime();
for( int j = 0; j< MAX_ITER; j++){
exercice2_cuda_withmemcpy(d_v1,v1,d_v2,v2,d_s,s_gpu,32,VEC_SIZE/DIV_MAX);
}
end = omp_get_wtime();
gpu_time = (end-start)/(MAX_ITER);
std::cout << "Gpu Time: " << gpu_time << " Acceleration Ratio: " << cpu_time/gpu_time << std::endl << std::endl;
cudaFree(d_v1);
cudaFree(d_v2);
cudaFree(d_s);
free(v1);
free(v2);
free(s_cpu);
free(s_gpu);
return 0;
}
void exercice2_cuda_withmemcpy(float* d_x, float* x, float* d_y, float* y, float* d_s, float* s, int k, int size){
cudaMemcpy(d_x, x, size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, size*sizeof(float), cudaMemcpyHostToDevice);
saxpy_kernel<<<(size+k)/k, k>>>(size, 2.0f, d_x, d_y, d_s);
cudaMemcpy(s, d_s, size*sizeof(float), cudaMemcpyDeviceToHost);
}
void exercice2_cuda_nomemcpy(float* d_x, float* d_y, float* d_s, int k, int size){
saxpy_kernel<<<(size+k)/k, k>>>(size, 2.0f, d_x, d_y, d_s);
cudaDeviceSynchronize();
}
void exercice2_cpu(float a, float* x, float* y, float* s, int size){
#pragma omp parallel for num_threads(8)
for (int i=0; i<size; i++)
{
s[i] = a*x[i] + y[i];
}
}
__global__ void saxpy_kernel(int n, float a, float *v1, float *v2, float *s){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < n ) s[i] = a*v1[i] + v2[i];
}
__global__ void mean_kernel(int n, float* v1, float* v2, float* res){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if( i < n ) res[i] = (v1[i] + v2[i])/2;
}
float rand_float(){
return (float)((rand() % 360) - 180.0);
}
|
5ef6ae437f02d108f50dd9fb6d14fe328486a4b2.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Cryptohaze Multiforcer & Wordyforcer - low performance GPU password cracking
Copyright (C) 2011 Bitweasil (http://www.cryptohaze.com/)
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
* @section DESCRIPTION
*
* This file implements DoubleMD5 multihash cracking.
*/
#include <stdint.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
//#include "CUDA_Common/cuPrintf.cu"
#include "MFN_CUDA_device/MFN_CUDA_incrementors.h"
#include "MFN_CUDA_device/MFN_CUDA_Common.h"
#include "MFN_CUDA_device/MFN_CUDA_MD5.h"
#if !defined(__HIPCC__)
// define the keywords, so that the IDE does not complain about them
#define __global__
#define __device__
#define __shared__
#define __constant__
#define blockIdx.x 1
#define blockDim.x 1
#define threadIdx.x 1
#define __align__() /**/
#endif
/**
* The maximum password length supported by this hash type.
*/
#define MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_PASSLEN 48
/**
* The maximum charset length supported by this hash type.
*/
#define MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_CHARSET_LENGTH 128
// Define the constant types used by the kernels here.
__device__ __constant__ __align__(16) uint8_t deviceCharsetPlainDoubleMD5[MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_CHARSET_LENGTH * \
MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_PASSLEN];
__device__ __constant__ __align__(16) uint8_t deviceReverseCharsetPlainDoubleMD5[MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_CHARSET_LENGTH * \
MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_PASSLEN];
__device__ __constant__ uint8_t charsetLengthsPlainDoubleMD5[MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_PASSLEN];
__device__ __constant__ __align__(16) uint8_t constantBitmapAPlainDoubleMD5[8192];
/**
* Constant parameters go here instead of getting passed as kernel arguments.
* This allows for faster accesses (as they are cached, and all threads will
* be accessing the same element), and also reduces the shared memory usage,
* which may allow for better occupancy in the future. The kernels will load
* these as needed, and theoretically will not need registers for some of them,
* which will help reduce the register pressure on kernels. Hopefully.
*/
// Password length. Needed for some offset calculations.
__device__ __constant__ uint8_t passwordLengthPlainDoubleMD5;
// Number of hashes present in memory.
__device__ __constant__ uint64_t numberOfHashesPlainDoubleMD5;
// Address of the hashlist in global memory.
__device__ __constant__ uint8_t *deviceGlobalHashlistAddressPlainDoubleMD5;
// Addresses of the various global bitmaps.
__device__ __constant__ uint8_t *deviceGlobalBitmapAPlainDoubleMD5;
__device__ __constant__ uint8_t *deviceGlobalBitmapBPlainDoubleMD5;
__device__ __constant__ uint8_t *deviceGlobalBitmapCPlainDoubleMD5;
__device__ __constant__ uint8_t *deviceGlobalBitmapDPlainDoubleMD5;
// Addresses of the arrays for found passwords & success flags
__device__ __constant__ uint8_t *deviceGlobalFoundPasswordsPlainDoubleMD5;
__device__ __constant__ uint8_t *deviceGlobalFoundPasswordFlagsPlainDoubleMD5;
__device__ __constant__ uint8_t *deviceGlobalStartPointsPlainDoubleMD5;
__device__ __constant__ uint32_t *deviceGlobalStartPasswords32PlainDoubleMD5;
__device__ __constant__ uint32_t deviceNumberStepsToRunPlainDoubleMD5;
__device__ __constant__ uint64_t deviceNumberThreadsPlainDoubleMD5;
// Defined if we are using the loadPasswords32/storePasswords32
#define USE_NEW_PASSWORD_LOADING 1
__constant__ char hexLookupValuesDoubleMD5[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
#define MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(pass_len) \
__global__ void MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_##pass_len () { \
uint32_t b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, a, b, c, d; \
uint32_t b0pass, b1pass, b2pass, b3pass; \
uint32_t password_count = 0, passOffset; \
__shared__ uint8_t __align__(16) sharedCharsetPlainDoubleMD5[MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_CHARSET_LENGTH * pass_len]; \
__shared__ uint8_t __align__(16) sharedReverseCharsetPlainDoubleMD5[MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_CHARSET_LENGTH * pass_len]; \
__shared__ uint8_t sharedCharsetLengthsPlainDoubleMD5[pass_len]; \
__shared__ uint8_t __align__(16) sharedBitmap[8192]; \
__shared__ uint8_t hashLookup[256][2]; \
if (threadIdx.x == 0) { \
uint64_t *sharedCharset64 = (uint64_t *)sharedCharsetPlainDoubleMD5; \
uint64_t *deviceCharset64 = (uint64_t *)deviceCharsetPlainDoubleMD5; \
uint64_t *sharedReverseCharset64 = (uint64_t *)sharedReverseCharsetPlainDoubleMD5; \
uint64_t *deviceReverseCharset64 = (uint64_t *)deviceReverseCharsetPlainDoubleMD5; \
uint64_t *constantBitmap64 = (uint64_t *)constantBitmapAPlainDoubleMD5; \
uint64_t *sharedBitmap64 = (uint64_t *)sharedBitmap; \
for (a = 0; a < ((MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_CHARSET_LENGTH * pass_len) / 8); a++) { \
sharedCharset64[a] = deviceCharset64[a]; \
sharedReverseCharset64[a] = deviceReverseCharset64[a]; \
} \
for (a = 0; a < pass_len; a++) { \
sharedCharsetLengthsPlainDoubleMD5[a] = charsetLengthsPlainDoubleMD5[a]; \
} \
for (a = 0; a < 8192 / 8; a++) { \
sharedBitmap64[a] = constantBitmap64[a]; \
} \
for (a = 0; a < 256; a++) { \
hashLookup[a][0] = hexLookupValuesDoubleMD5[a / 16]; \
hashLookup[a][1] = hexLookupValuesDoubleMD5[a % 16]; \
} \
} \
syncthreads(); \
b0 = b1 = b2 = b3 = b4 = b5 = b6 = b7 = b8 = b9 = b10 = b11 = b12 = b13 = b14 = b15 = 0; \
b14 = pass_len * 8; \
if (USE_NEW_PASSWORD_LOADING) { \
loadPasswords32(deviceGlobalStartPasswords32PlainDoubleMD5, deviceNumberThreadsPlainDoubleMD5, pass_len); \
} else {\
if (charsetLengthsPlainDoubleMD5[1] == 0) { \
loadPasswordSingle(sharedCharsetPlainDoubleMD5, deviceGlobalStartPointsPlainDoubleMD5, deviceNumberThreadsPlainDoubleMD5, pass_len); \
} else { \
loadPasswordMultiple(sharedCharsetPlainDoubleMD5, deviceGlobalStartPointsPlainDoubleMD5, deviceNumberThreadsPlainDoubleMD5, pass_len, MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_CHARSET_LENGTH); \
} \
ResetCharacterAtPosition(0x80, pass_len, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15); \
} \
while (password_count < deviceNumberStepsToRunPlainDoubleMD5) { \
MD5_FULL_HASH(); \
b0pass = b0; b1pass = b1; b2pass = b2; b3pass = b3; \
LoadHash16AsLEString(hashLookup); \
b8 = 0x00000080; \
b14 = 32 * 8; \
MD5_FIRST_3_ROUNDS(); \
MD5II (a, b, c, d, b0, MD5S41, 0xf4292244); \
MD5II (d, a, b, c, b7, MD5S42, 0x432aff97); \
MD5II (c, d, a, b, b14, MD5S43, 0xab9423a7); \
MD5II (b, c, d, a, b5, MD5S44, 0xfc93a039); \
MD5II (a, b, c, d, b12, MD5S41, 0x655b59c3); \
MD5II (d, a, b, c, b3, MD5S42, 0x8f0ccc92); \
MD5II (c, d, a, b, b10, MD5S43, 0xffeff47d); \
MD5II (b, c, d, a, b1, MD5S44, 0x85845dd1); \
MD5II (a, b, c, d, b8, MD5S41, 0x6fa87e4f); \
MD5II (d, a, b, c, b15, MD5S42, 0xfe2ce6e0); \
MD5II (c, d, a, b, b6, MD5S43, 0xa3014314); \
MD5II (b, c, d, a, b13, MD5S44, 0x4e0811a1); \
MD5II (a, b, c, d, b4, MD5S41, 0xf7537e82); \
if ((sharedBitmap[(a & 0x0000ffff) >> 3] >> (a & 0x00000007)) & 0x00000001) { \
if (!(deviceGlobalBitmapAPlainDoubleMD5) || ((deviceGlobalBitmapAPlainDoubleMD5[(a >> 3) & 0x07FFFFFF] >> (a & 0x7)) & 0x1)) { \
MD5II (d, a, b, c, b11, MD5S42, 0xbd3af235); \
if (!deviceGlobalBitmapDPlainDoubleMD5 || ((deviceGlobalBitmapDPlainDoubleMD5[(d >> 3) & 0x07FFFFFF] >> (d & 0x7)) & 0x1)) { \
MD5II (c, d, a, b, b2, MD5S43, 0x2ad7d2bb); \
if (!deviceGlobalBitmapCPlainDoubleMD5 || ((deviceGlobalBitmapCPlainDoubleMD5[(c >> 3) & 0x07FFFFFF] >> (c & 0x7)) & 0x1)) { \
MD5II (b, c, d, a, b9, MD5S44, 0xeb86d391); \
if (!deviceGlobalBitmapBPlainDoubleMD5 || ((deviceGlobalBitmapBPlainDoubleMD5[(b >> 3) & 0x07FFFFFF] >> (b & 0x7)) & 0x1)) { \
b0 = b0pass; b1 = b1pass; b2 = b2pass; b3 = b3pass; \
checkHashList128LE(a, b, c, d, b0, b1, b2, b3, \
deviceGlobalFoundPasswordsPlainDoubleMD5, deviceGlobalFoundPasswordFlagsPlainDoubleMD5, \
deviceGlobalHashlistAddressPlainDoubleMD5, numberOfHashesPlainDoubleMD5, \
passwordLengthPlainDoubleMD5); \
} } } } }\
b0 = b1 = b2 = b3 = b4 = b5 = b6 = b7 = b8 = b9 = b10 = b11 = b12 = b13 = b14 = b15 = 0; \
b14 = pass_len * 8; \
b0 = b0pass; b1 = b1pass; b2 = b2pass; b3 = b3pass; \
if (charsetLengthsPlainDoubleMD5[1] == 0) { \
makeMFNSingleIncrementors##pass_len (sharedCharsetPlainDoubleMD5, sharedReverseCharsetPlainDoubleMD5, sharedCharsetLengthsPlainDoubleMD5); \
} else { \
makeMFNMultipleIncrementors##pass_len (sharedCharsetPlainDoubleMD5, sharedReverseCharsetPlainDoubleMD5, sharedCharsetLengthsPlainDoubleMD5); \
} \
password_count++; \
} \
if (USE_NEW_PASSWORD_LOADING) { \
storePasswords32(deviceGlobalStartPasswords32PlainDoubleMD5, deviceNumberThreadsPlainDoubleMD5, pass_len); \
} \
}
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(1);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(2);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(3);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(4);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(5);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(6);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(7);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(8);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(9);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(10);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(11);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(12);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(13);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(14);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(15);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(16);
extern "C" hipError_t MFNHashTypePlainCUDA_DoubleMD5_CopyValueToConstant(
const char *symbolName, void *hostDataAddress, size_t bytesToCopy) {
return hipMemcpyToSymbol(symbolName, hostDataAddress, bytesToCopy);
}
extern "C" hipError_t MFNHashTypePlainCUDA_DoubleMD5_LaunchKernel(uint32_t passwordLength, uint32_t Blocks, uint32_t Threads) {
//printf("MFNHashTypePlainCUDA_MD5_LaunchKernel()\n");
//cudaPrintfInit();
switch (passwordLength) {
case 1:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_1) , dim3(Blocks), dim3(Threads) , 0, 0, );
break;
case 2:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_2) , dim3(Blocks), dim3(Threads) , 0, 0, );
break;
case 3:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_3) , dim3(Blocks), dim3(Threads) , 0, 0, );
break;
case 4:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_4) , dim3(Blocks), dim3(Threads) , 0, 0, );
break;
case 5:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_5) , dim3(Blocks), dim3(Threads) , 0, 0, );
break;
case 6:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_6) , dim3(Blocks), dim3(Threads) , 0, 0, );
break;
case 7:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_7) , dim3(Blocks), dim3(Threads) , 0, 0, );
break;
case 8:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_8) , dim3(Blocks), dim3(Threads) , 0, 0, );
break;
case 9:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_9) , dim3(Blocks), dim3(Threads) , 0, 0, );
break;
case 10:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_10) , dim3(Blocks), dim3(Threads) , 0, 0, );
break;
case 11:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_11) , dim3(Blocks), dim3(Threads) , 0, 0, );
break;
case 12:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_12) , dim3(Blocks), dim3(Threads) , 0, 0, );
break;
case 13:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_13) , dim3(Blocks), dim3(Threads) , 0, 0, );
break;
case 14:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_14) , dim3(Blocks), dim3(Threads) , 0, 0, );
break;
case 15:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_15) , dim3(Blocks), dim3(Threads) , 0, 0, );
break;
case 16:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_16) , dim3(Blocks), dim3(Threads) , 0, 0, );
break;
default:
printf("Password length %d unsupported!\n", passwordLength);
exit(1);
break;
}
//cudaPrintfDisplay(stdout, true);
//cudaPrintfEnd();
return hipGetLastError();
}
|
5ef6ae437f02d108f50dd9fb6d14fe328486a4b2.cu
|
/*
Cryptohaze Multiforcer & Wordyforcer - low performance GPU password cracking
Copyright (C) 2011 Bitweasil (http://www.cryptohaze.com/)
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
* @section DESCRIPTION
*
* This file implements DoubleMD5 multihash cracking.
*/
#include <stdint.h>
#include <stdio.h>
#include <cuda.h>
//#include "CUDA_Common/cuPrintf.cu"
#include "MFN_CUDA_device/MFN_CUDA_incrementors.h"
#include "MFN_CUDA_device/MFN_CUDA_Common.h"
#include "MFN_CUDA_device/MFN_CUDA_MD5.h"
#if !defined(__CUDACC__)
// define the keywords, so that the IDE does not complain about them
#define __global__
#define __device__
#define __shared__
#define __constant__
#define blockIdx.x 1
#define blockDim.x 1
#define threadIdx.x 1
#define __align__() /**/
#endif
/**
* The maximum password length supported by this hash type.
*/
#define MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_PASSLEN 48
/**
* The maximum charset length supported by this hash type.
*/
#define MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_CHARSET_LENGTH 128
// Define the constant types used by the kernels here.
__device__ __constant__ __align__(16) uint8_t deviceCharsetPlainDoubleMD5[MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_CHARSET_LENGTH * \
MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_PASSLEN];
__device__ __constant__ __align__(16) uint8_t deviceReverseCharsetPlainDoubleMD5[MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_CHARSET_LENGTH * \
MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_PASSLEN];
__device__ __constant__ uint8_t charsetLengthsPlainDoubleMD5[MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_PASSLEN];
__device__ __constant__ __align__(16) uint8_t constantBitmapAPlainDoubleMD5[8192];
/**
* Constant parameters go here instead of getting passed as kernel arguments.
* This allows for faster accesses (as they are cached, and all threads will
* be accessing the same element), and also reduces the shared memory usage,
* which may allow for better occupancy in the future. The kernels will load
* these as needed, and theoretically will not need registers for some of them,
* which will help reduce the register pressure on kernels. Hopefully.
*/
// Password length. Needed for some offset calculations.
__device__ __constant__ uint8_t passwordLengthPlainDoubleMD5;
// Number of hashes present in memory.
__device__ __constant__ uint64_t numberOfHashesPlainDoubleMD5;
// Address of the hashlist in global memory.
__device__ __constant__ uint8_t *deviceGlobalHashlistAddressPlainDoubleMD5;
// Addresses of the various global bitmaps.
__device__ __constant__ uint8_t *deviceGlobalBitmapAPlainDoubleMD5;
__device__ __constant__ uint8_t *deviceGlobalBitmapBPlainDoubleMD5;
__device__ __constant__ uint8_t *deviceGlobalBitmapCPlainDoubleMD5;
__device__ __constant__ uint8_t *deviceGlobalBitmapDPlainDoubleMD5;
// Addresses of the arrays for found passwords & success flags
__device__ __constant__ uint8_t *deviceGlobalFoundPasswordsPlainDoubleMD5;
__device__ __constant__ uint8_t *deviceGlobalFoundPasswordFlagsPlainDoubleMD5;
__device__ __constant__ uint8_t *deviceGlobalStartPointsPlainDoubleMD5;
__device__ __constant__ uint32_t *deviceGlobalStartPasswords32PlainDoubleMD5;
__device__ __constant__ uint32_t deviceNumberStepsToRunPlainDoubleMD5;
__device__ __constant__ uint64_t deviceNumberThreadsPlainDoubleMD5;
// Defined if we are using the loadPasswords32/storePasswords32
#define USE_NEW_PASSWORD_LOADING 1
__constant__ char hexLookupValuesDoubleMD5[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
#define MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(pass_len) \
__global__ void MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_##pass_len () { \
uint32_t b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, a, b, c, d; \
uint32_t b0pass, b1pass, b2pass, b3pass; \
uint32_t password_count = 0, passOffset; \
__shared__ uint8_t __align__(16) sharedCharsetPlainDoubleMD5[MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_CHARSET_LENGTH * pass_len]; \
__shared__ uint8_t __align__(16) sharedReverseCharsetPlainDoubleMD5[MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_CHARSET_LENGTH * pass_len]; \
__shared__ uint8_t sharedCharsetLengthsPlainDoubleMD5[pass_len]; \
__shared__ uint8_t __align__(16) sharedBitmap[8192]; \
__shared__ uint8_t hashLookup[256][2]; \
if (threadIdx.x == 0) { \
uint64_t *sharedCharset64 = (uint64_t *)sharedCharsetPlainDoubleMD5; \
uint64_t *deviceCharset64 = (uint64_t *)deviceCharsetPlainDoubleMD5; \
uint64_t *sharedReverseCharset64 = (uint64_t *)sharedReverseCharsetPlainDoubleMD5; \
uint64_t *deviceReverseCharset64 = (uint64_t *)deviceReverseCharsetPlainDoubleMD5; \
uint64_t *constantBitmap64 = (uint64_t *)constantBitmapAPlainDoubleMD5; \
uint64_t *sharedBitmap64 = (uint64_t *)sharedBitmap; \
for (a = 0; a < ((MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_CHARSET_LENGTH * pass_len) / 8); a++) { \
sharedCharset64[a] = deviceCharset64[a]; \
sharedReverseCharset64[a] = deviceReverseCharset64[a]; \
} \
for (a = 0; a < pass_len; a++) { \
sharedCharsetLengthsPlainDoubleMD5[a] = charsetLengthsPlainDoubleMD5[a]; \
} \
for (a = 0; a < 8192 / 8; a++) { \
sharedBitmap64[a] = constantBitmap64[a]; \
} \
for (a = 0; a < 256; a++) { \
hashLookup[a][0] = hexLookupValuesDoubleMD5[a / 16]; \
hashLookup[a][1] = hexLookupValuesDoubleMD5[a % 16]; \
} \
} \
syncthreads(); \
b0 = b1 = b2 = b3 = b4 = b5 = b6 = b7 = b8 = b9 = b10 = b11 = b12 = b13 = b14 = b15 = 0; \
b14 = pass_len * 8; \
if (USE_NEW_PASSWORD_LOADING) { \
loadPasswords32(deviceGlobalStartPasswords32PlainDoubleMD5, deviceNumberThreadsPlainDoubleMD5, pass_len); \
} else {\
if (charsetLengthsPlainDoubleMD5[1] == 0) { \
loadPasswordSingle(sharedCharsetPlainDoubleMD5, deviceGlobalStartPointsPlainDoubleMD5, deviceNumberThreadsPlainDoubleMD5, pass_len); \
} else { \
loadPasswordMultiple(sharedCharsetPlainDoubleMD5, deviceGlobalStartPointsPlainDoubleMD5, deviceNumberThreadsPlainDoubleMD5, pass_len, MFN_HASH_TYPE_PLAIN_CUDA_DOUBLE_MD5_MAX_CHARSET_LENGTH); \
} \
ResetCharacterAtPosition(0x80, pass_len, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15); \
} \
while (password_count < deviceNumberStepsToRunPlainDoubleMD5) { \
MD5_FULL_HASH(); \
b0pass = b0; b1pass = b1; b2pass = b2; b3pass = b3; \
LoadHash16AsLEString(hashLookup); \
b8 = 0x00000080; \
b14 = 32 * 8; \
MD5_FIRST_3_ROUNDS(); \
MD5II (a, b, c, d, b0, MD5S41, 0xf4292244); \
MD5II (d, a, b, c, b7, MD5S42, 0x432aff97); \
MD5II (c, d, a, b, b14, MD5S43, 0xab9423a7); \
MD5II (b, c, d, a, b5, MD5S44, 0xfc93a039); \
MD5II (a, b, c, d, b12, MD5S41, 0x655b59c3); \
MD5II (d, a, b, c, b3, MD5S42, 0x8f0ccc92); \
MD5II (c, d, a, b, b10, MD5S43, 0xffeff47d); \
MD5II (b, c, d, a, b1, MD5S44, 0x85845dd1); \
MD5II (a, b, c, d, b8, MD5S41, 0x6fa87e4f); \
MD5II (d, a, b, c, b15, MD5S42, 0xfe2ce6e0); \
MD5II (c, d, a, b, b6, MD5S43, 0xa3014314); \
MD5II (b, c, d, a, b13, MD5S44, 0x4e0811a1); \
MD5II (a, b, c, d, b4, MD5S41, 0xf7537e82); \
if ((sharedBitmap[(a & 0x0000ffff) >> 3] >> (a & 0x00000007)) & 0x00000001) { \
if (!(deviceGlobalBitmapAPlainDoubleMD5) || ((deviceGlobalBitmapAPlainDoubleMD5[(a >> 3) & 0x07FFFFFF] >> (a & 0x7)) & 0x1)) { \
MD5II (d, a, b, c, b11, MD5S42, 0xbd3af235); \
if (!deviceGlobalBitmapDPlainDoubleMD5 || ((deviceGlobalBitmapDPlainDoubleMD5[(d >> 3) & 0x07FFFFFF] >> (d & 0x7)) & 0x1)) { \
MD5II (c, d, a, b, b2, MD5S43, 0x2ad7d2bb); \
if (!deviceGlobalBitmapCPlainDoubleMD5 || ((deviceGlobalBitmapCPlainDoubleMD5[(c >> 3) & 0x07FFFFFF] >> (c & 0x7)) & 0x1)) { \
MD5II (b, c, d, a, b9, MD5S44, 0xeb86d391); \
if (!deviceGlobalBitmapBPlainDoubleMD5 || ((deviceGlobalBitmapBPlainDoubleMD5[(b >> 3) & 0x07FFFFFF] >> (b & 0x7)) & 0x1)) { \
b0 = b0pass; b1 = b1pass; b2 = b2pass; b3 = b3pass; \
checkHashList128LE(a, b, c, d, b0, b1, b2, b3, \
deviceGlobalFoundPasswordsPlainDoubleMD5, deviceGlobalFoundPasswordFlagsPlainDoubleMD5, \
deviceGlobalHashlistAddressPlainDoubleMD5, numberOfHashesPlainDoubleMD5, \
passwordLengthPlainDoubleMD5); \
} } } } }\
b0 = b1 = b2 = b3 = b4 = b5 = b6 = b7 = b8 = b9 = b10 = b11 = b12 = b13 = b14 = b15 = 0; \
b14 = pass_len * 8; \
b0 = b0pass; b1 = b1pass; b2 = b2pass; b3 = b3pass; \
if (charsetLengthsPlainDoubleMD5[1] == 0) { \
makeMFNSingleIncrementors##pass_len (sharedCharsetPlainDoubleMD5, sharedReverseCharsetPlainDoubleMD5, sharedCharsetLengthsPlainDoubleMD5); \
} else { \
makeMFNMultipleIncrementors##pass_len (sharedCharsetPlainDoubleMD5, sharedReverseCharsetPlainDoubleMD5, sharedCharsetLengthsPlainDoubleMD5); \
} \
password_count++; \
} \
if (USE_NEW_PASSWORD_LOADING) { \
storePasswords32(deviceGlobalStartPasswords32PlainDoubleMD5, deviceNumberThreadsPlainDoubleMD5, pass_len); \
} \
}
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(1);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(2);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(3);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(4);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(5);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(6);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(7);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(8);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(9);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(10);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(11);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(12);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(13);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(14);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(15);
MAKE_MFN_DOUBLE_MD5_KERNEL1_8LENGTH(16);
extern "C" cudaError_t MFNHashTypePlainCUDA_DoubleMD5_CopyValueToConstant(
const char *symbolName, void *hostDataAddress, size_t bytesToCopy) {
return cudaMemcpyToSymbol(symbolName, hostDataAddress, bytesToCopy);
}
extern "C" cudaError_t MFNHashTypePlainCUDA_DoubleMD5_LaunchKernel(uint32_t passwordLength, uint32_t Blocks, uint32_t Threads) {
//printf("MFNHashTypePlainCUDA_MD5_LaunchKernel()\n");
//cudaPrintfInit();
switch (passwordLength) {
case 1:
MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_1 <<< Blocks, Threads >>> ();
break;
case 2:
MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_2 <<< Blocks, Threads >>> ();
break;
case 3:
MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_3 <<< Blocks, Threads >>> ();
break;
case 4:
MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_4 <<< Blocks, Threads >>> ();
break;
case 5:
MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_5 <<< Blocks, Threads >>> ();
break;
case 6:
MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_6 <<< Blocks, Threads >>> ();
break;
case 7:
MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_7 <<< Blocks, Threads >>> ();
break;
case 8:
MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_8 <<< Blocks, Threads >>> ();
break;
case 9:
MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_9 <<< Blocks, Threads >>> ();
break;
case 10:
MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_10 <<< Blocks, Threads >>> ();
break;
case 11:
MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_11 <<< Blocks, Threads >>> ();
break;
case 12:
MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_12 <<< Blocks, Threads >>> ();
break;
case 13:
MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_13 <<< Blocks, Threads >>> ();
break;
case 14:
MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_14 <<< Blocks, Threads >>> ();
break;
case 15:
MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_15 <<< Blocks, Threads >>> ();
break;
case 16:
MFNHashTypePlainCUDA_DoubleMD5_GeneratedKernel_16 <<< Blocks, Threads >>> ();
break;
default:
printf("Password length %d unsupported!\n", passwordLength);
exit(1);
break;
}
//cudaPrintfDisplay(stdout, true);
//cudaPrintfEnd();
return cudaGetLastError();
}
|
ae5b0763f17e036f1cdbc847677a6455ef65342e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// GPU Libraries
// Macro to handle errors occured in CUDA api
__device__ void recursiveReduce(int *g_inData, int *g_outData, int inSize, int outSize)
{
extern __shared__ int sData[];
// Identification
unsigned int tId = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// Initialize
sData[tId] = 0;
__syncthreads();
// Fill up the shared memory
if (tId < blockDim.x) {
sData[tId] = g_inData[i];
}
__syncthreads();
// Tree based reduction
for (unsigned int d = 1; d < blockDim.x; d *= 2) {
if (tId % (2 * d) == 0)
if (tId + d < blockDim.x)
sData[tId] += sData[tId + d];
__syncthreads();
}
// Write the result for this block to global memory
if (tId == 0) {
g_outData[blockIdx.x] = sData[0];
}
__syncthreads();
// Recursive call
if (outSize > 1 && i == 0) {
// Kernel Launch
recursiveReduce(g_outData, g_outData, outSize, (outSize - 1) / blockDim.x + 1);
}
else return;
}
__global__ void reduceKernel(int *g_inData, int *g_outData, int inSize, int outSize)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i == 0) {
recursiveReduce(g_inData, g_outData, inSize, outSize);
}
}
|
ae5b0763f17e036f1cdbc847677a6455ef65342e.cu
|
#include "includes.h"
// GPU Libraries
// Macro to handle errors occured in CUDA api
__device__ void recursiveReduce(int *g_inData, int *g_outData, int inSize, int outSize)
{
extern __shared__ int sData[];
// Identification
unsigned int tId = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// Initialize
sData[tId] = 0;
__syncthreads();
// Fill up the shared memory
if (tId < blockDim.x) {
sData[tId] = g_inData[i];
}
__syncthreads();
// Tree based reduction
for (unsigned int d = 1; d < blockDim.x; d *= 2) {
if (tId % (2 * d) == 0)
if (tId + d < blockDim.x)
sData[tId] += sData[tId + d];
__syncthreads();
}
// Write the result for this block to global memory
if (tId == 0) {
g_outData[blockIdx.x] = sData[0];
}
__syncthreads();
// Recursive call
if (outSize > 1 && i == 0) {
// Kernel Launch
recursiveReduce(g_outData, g_outData, outSize, (outSize - 1) / blockDim.x + 1);
}
else return;
}
__global__ void reduceKernel(int *g_inData, int *g_outData, int inSize, int outSize)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i == 0) {
recursiveReduce(g_inData, g_outData, inSize, outSize);
}
}
|
529a51e8cffe27a09836c02d4d7a0a62ec1b8969.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <inttypes.h>
#include <float.h>
#ifndef tile_size_x
#define tile_size_x 1
#endif
#ifndef block_size_x
#define block_size_x 512
#endif
#ifndef window_width
#define window_width 1500
#endif
#define USE_READ_ONLY_CACHE read_only
#if USE_READ_ONLY_CACHE == 1
#define LDG(x, y) __ldg(x+y)
#elif USE_READ_ONLY_CACHE == 0
#define LDG(x, y) x[y]
#endif
#ifndef write_sums
#define write_sums 0
#endif
#ifndef write_spm
#define write_spm 0
#endif
#ifndef write_rows
#define write_rows 0
#endif
#ifndef use_shared
#define use_shared 0
#endif
extern "C" {
__global__ void quadratic_difference_full(int *__restrict__ row_idx, int *__restrict__ col_idx, const int *__restrict__ prefix_sums, int *__restrict__ sums,
int N, int sliding_window_width, const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ z,
const float *__restrict__ ct);
__global__ void match3b_full(int *__restrict__ row_idx, int *__restrict__ col_idx, const int *__restrict__ prefix_sums, int *__restrict__ sums,
int N, int sliding_window_width, const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ z,
const float *__restrict__ ct);
}
template<typename F>
__device__ void correlate_full(int *__restrict__ row_idx, int *__restrict__ col_idx, const int *__restrict__ prefix_sums, int *__restrict__ sums,
int N, const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ z,
const float *__restrict__ ct, F criterion);
__forceinline__ __device__ bool match3b(float x1, float y1, float z1, float t1, float x2, float y2, float z2, float t2);
__forceinline__ __device__ bool quadratic_difference(float x1, float y1, float z1, float ct1, float x2, float y2, float z2, float ct2);
/*
* This is the kernel used for computing correlations in both directions using the quadratic difference criterion
*/
__global__ void quadratic_difference_full(int *__restrict__ row_idx, int *__restrict__ col_idx, const int *__restrict__ prefix_sums, int *__restrict__ sums,
int N, int sliding_window_width, const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ z,
const float *__restrict__ ct) {
correlate_full(row_idx, col_idx, prefix_sums, sums, N, x, y, z, ct, quadratic_difference);
}
/*
* This is the kernel used for computing correlations in both directions using the match 3b criterion
*/
__global__ void match3b_full(int *__restrict__ row_idx, int *__restrict__ col_idx, const int *__restrict__ prefix_sums, int *__restrict__ sums,
int N, int sliding_window_width, const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ z,
const float *__restrict__ ct) {
correlate_full(row_idx, col_idx, prefix_sums, sums, N, x, y, z, ct, match3b);
}
/*
* This function fills shared memory will values from global memory
*
* The area loaded is equal to the working set of this thread block (block_size_x * tile_size_x) plus the window_width
*
* The threads in a thread block will load values in global memory from their global index 'i' up to block_size_x*tile_size_x+window_width
* It is possible to modify which values from global memory are loaded by using the parameter 'offset'
* The threads can skip the first x elements of shared memory by using a non zero value for 'start'
* N is the total number of hits in the input, used to guard out-of-bound accesses
*
* first loading phase, start=0, offset=bx-window_width
* second loading phase, start=block_size_x*tile_size_x, offset=bx
*/
__forceinline__ __device__ void fill_shared_memory(float *sh_ct, float *sh_x, float *sh_y, float* sh_z,
const float *ct, const float *x, const float *y, const float *z,
int bx, int i, int start, int offset, int N) {
#pragma unroll
for (int k=start+i; k < block_size_x*tile_size_x+window_width; k+=block_size_x) {
if (k+offset >= 0 && k+offset < N) {
sh_ct[k] = LDG(ct,k+offset);
sh_x[k] = LDG(x,k+offset);
sh_y[k] = LDG(y,k+offset);
sh_z[k] = LDG(z,k+offset);
} else {
sh_ct[k] = (float) NAN; //this value ensures out-of-bound hits won't be correlated
sh_x[k] = 0.0f;
sh_y[k] = 0.0f;
sh_z[k] = 0.0f;
}
}
}
/*
* This function is responsible for looping over the iteration space of each thread
* For each correlation to be computed it will call the criterion and either
* store the number of correlations or the coordinates of the correlated hit.
*/
template<typename F>
__forceinline__ __device__ void correlate(int *row_idx, int *col_idx, int *sum, int *offset, int bx, int i,
float *l_x, float *l_y, float *l_z, float *l_ct, float *sh_x, float *sh_y, float *sh_z, float *sh_ct, int col_offset, int it_offset, F criterion) {
for (int j=it_offset; j < window_width+it_offset; j++) {
#pragma unroll
for (int ti=0; ti<tile_size_x; ti++) {
bool condition = criterion(l_x[ti], l_y[ti], l_z[ti], l_ct[ti],
sh_x[i+ti*block_size_x+j], sh_y[i+ti*block_size_x+j],
sh_z[i+ti*block_size_x+j], sh_ct[i+ti*block_size_x+j]);
if (condition) {
#if write_spm == 1
#if write_rows
row_idx[offset[ti]] = bx+i+ti*block_size_x;
#endif
col_idx[offset[ti]] = bx+i+ti*block_size_x+j+col_offset;
offset[ti] += 1;
#endif
#if write_sums == 1
sum[ti] += 1;
#endif
}
}
}
}
/*
* This function computes the correlated hits of hits no more than 'window_width' apart in both directions.
* It does this using a 1-dimensional mapping of threads and thread blocks to hits in this time slice.
*
* This function supports the usual set of optimizations, including tiling, read-only cache.
* Tuning parameters supported are 'read_only' [0,1], 'tile_size_x' any low number, and 'block_size_x' multiple of 32.
*
* 'write_sums' can be set to [0,1] to enable the code that outputs the number of correlated hits per hit
* This number is used to compute the offsets into the sparse matrix representation of the correlations table.
*
* 'write_spm' can be set to [0,1] to enable the code that outputs the sparse matrix
* 'write_rows' can be set to [0,1] to enable also writing the row_idx, only effective when write_spm=1
*
*/
template<typename F>
__device__ void correlate_full(int *__restrict__ row_idx, int *__restrict__ col_idx, const int *__restrict__ prefix_sums, int *__restrict__ sums,
int N, const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ z,
const float *__restrict__ ct, F criterion) {
int i = threadIdx.x;
int bx = blockIdx.x * block_size_x * tile_size_x;
__shared__ float sh_ct[block_size_x * tile_size_x + window_width];
__shared__ float sh_x[block_size_x * tile_size_x + window_width];
__shared__ float sh_y[block_size_x * tile_size_x + window_width];
__shared__ float sh_z[block_size_x * tile_size_x + window_width];
//the first loading phase
fill_shared_memory(sh_ct, sh_x, sh_y, sh_z, ct, x, y, z, bx, i, 0, bx-window_width, N);
#if write_spm == 1
int offset[tile_size_x];
if (bx+i==0) {
offset[0] = 0;
}
#pragma unroll
for (int ti=0; ti<tile_size_x; ti++) {
if (bx+i+ti*block_size_x-1 >= 0 && bx+i+ti*block_size_x-1 < N) {
offset[ti] = prefix_sums[bx+i+ti*block_size_x-1];
}
}
#else
int *offset = (int *)0;
#endif
__syncthreads();
//start of the the computations phase
float l_ct[tile_size_x];
float l_x[tile_size_x];
float l_y[tile_size_x];
float l_z[tile_size_x];
#if write_sums == 1
int sum[tile_size_x];
#else
int *sum = (int *)0;
#endif
//keep the most often used values in registers
#pragma unroll
for (int ti=0; ti<tile_size_x; ti++) {
l_ct[ti] = sh_ct[i+ti*block_size_x+window_width];
l_x[ti] = sh_x[i+ti*block_size_x+window_width];
l_y[ti] = sh_y[i+ti*block_size_x+window_width];
l_z[ti] = sh_z[i+ti*block_size_x+window_width];
#if write_sums == 1
sum[ti] = 0;
#endif
}
//first loop computes correlations with earlier hits
correlate(row_idx, col_idx, sum, offset, bx, i, l_x, l_y, l_z, l_ct,
sh_x, sh_y, sh_z, sh_ct, -window_width, 0, criterion);
//make sure all threads are done with phase-1
__syncthreads();
//start load phase-2
//fill the first part of shared memory with data already in registers
#pragma unroll
for (int ti=0; ti<tile_size_x; ti++) {
sh_ct[i+ti*block_size_x] = l_ct[ti];
sh_x[i+ti*block_size_x] = l_x[ti];
sh_y[i+ti*block_size_x] = l_y[ti];
sh_z[i+ti*block_size_x] = l_z[ti];
}
//the first block_size_x*tile_size_x part has already been filled
fill_shared_memory(sh_ct, sh_x, sh_y, sh_z, ct, x, y, z, bx, i, block_size_x*tile_size_x, bx, N);
__syncthreads();
//the next loop computes correlations with hits later in time
correlate(row_idx, col_idx, sum, offset, bx, i, l_x, l_y, l_z, l_ct,
sh_x, sh_y, sh_z, sh_ct, 0, 1, criterion);
#if write_sums == 1
for (int ti=0; ti<tile_size_x; ti++) {
if (bx+i+ti*block_size_x < N) {
sums[bx+i+ti*block_size_x] = sum[ti];
}
}
#endif
}
//constants needed by Match 3B criterion
//the reason that we hard code the constants like this is that we can't use sqrt in device constants sadly
#define roadwidth 90.0f
#define speed_of_light 0.299792458f // m/ns
#define inverse_c (1.0f/speed_of_light)
#define index_of_refrac 1.3800851282f // average index of refraction of water
#define D0 (roadwidth)
#define D1 (roadwidth * 2.0f)
#define D02 (D0 * D0)
#define D12 (D1 * D1)
#define R2 (roadwidth * roadwidth)
#define Rs2 3847.2165714f
#define Rst 58.9942930573f
#define D22 42228.1334918f
#define Rt 85.6010699976f
/*
* This function implements the Match 3B criterion
*/
__forceinline__ __device__ bool match3b(float x1, float y1, float z1, float t1, float x2, float y2, float z2, float t2) {
float difft = fabsf(t1 - t2);
if (isnan(difft)) {
return false;
}
float d2 = ((x1-x2)*(x1-x2)) + ((y1-y2)*(y1-y2)) + ((z1-z2)*(z1-z2));
float dmax = 0.0f;
if (d2 < D02) {
dmax = sqrtf(d2) * index_of_refrac;
} else {
dmax = sqrtf(d2 - Rs2) + Rst;
}
if (difft > (dmax * inverse_c)) {
return false;
}
float dmin = 0.0f;
if (d2 > D22) {
dmin = sqrtf(d2 - R2) - Rt;
} else if (d2 > D12) {
dmin = sqrtf(d2 - D12);
} else {
return true;
}
return (difft >= (dmin*inverse_c));
}
/*
* This function implements the quadratic differnce criterion
*/
__forceinline__ __device__ bool quadratic_difference(float x1, float y1, float z1, float ct1, float x2, float y2, float z2, float ct2) {
float diffct = ct1 - ct2;
float diffx = x1 - x2;
float diffy = y1 - y2;
float diffz = z1 - z2;
return (diffct * diffct < diffx * diffx + diffy * diffy + diffz * diffz);
}
/*
* This is a small helper kernel to ready the input for the Match 3B kernel, currently not in use
*/
__global__ void convert_ct_to_t(float *ct, int n) {
int i = threadIdx.x + blockIdx.x * block_size_x;
if (i<n) {
ct[i] = ct[i] * inverse_c;
}
}
#ifndef shared_memory_size
#define shared_memory_size 10*block_size_x
#endif
/*
* This kernel is an experimental version of the above quadratic_difference_full kernel.
* It is not production ready and needs more work.
*
* This kernel uses warp-shuffle instructions to re-use many of
* the input values in registers and reduce the pressure on shared memory.
* However, it does this so drastically that shared memory is hardly needed anymore.
*
* Tuning parameters supported are 'block_size_x', 'read_only' [0,1], 'use_if' [0,1]
*
*/
__global__ void quadratic_difference_full_shfl(int *__restrict__ row_idx, int *__restrict__ col_idx, int *__restrict__ prefix_sums, int *sums, int N, int sliding_window_width,
const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ z, const float *__restrict__ ct) {
int tx = threadIdx.x;
int bx = blockIdx.x * block_size_x;
#if write_sums == 1
int sum = 0;
#endif
#if write_spm == 1
int offset = 0;
#if use_shared == 1
int block_start = 0;
__shared__ int sh_col_idx[shared_memory_size];
if (blockIdx.x > 0) {
block_start = prefix_sums[bx-1];
}
#elif write_rows == 1
int block_start = 0;
#endif
#endif
float ct_i = 0.0f;
float x_i = 0.0f;
float y_i = 0.0f;
float z_i = 0.0f;
int output = 0;
int i = bx + tx - window_width;
if (bx+tx < N) {
output = 1;
ct_i = LDG(ct,bx+tx);
x_i = LDG(x,bx+tx);
y_i = LDG(y,bx+tx);
z_i = LDG(z,bx+tx);
}
#if write_spm == 1
if (bx+tx > 0 && bx+tx < N) {
offset = prefix_sums[bx+tx-1];
}
#if use_shared == 1
offset -= block_start;
#endif
#endif
int laneid = tx & (32-1);
if (output) {
for (int j=0; j < 32-laneid && output; j++) {
if (i+j >= 0 && i+j<N) {
float diffct = ct_i - LDG(ct,i+j);
float diffx = x_i - LDG(x,i+j);
float diffy = y_i - LDG(y,i+j);
float diffz = z_i - LDG(z,i+j);
if (diffct * diffct < diffx * diffx + diffy * diffy + diffz * diffz) {
#if write_sums == 1
sum++;
#endif
#if write_spm == 1
#if write_rows
row_idx[offset + block_start] = bx+tx;
#endif
#if use_shared == 1
sh_col_idx[offset++] = i+j;
#else
col_idx[offset++] = i+j;
#endif
#endif
}
}
}
}//end of if output
int j;
#if f_unroll == 2
#pragma unroll 2
#elif f_unroll == 4
#pragma unroll 4
#endif
for (j=32; j < window_width*2-32; j+=32) {
float ct_j = 0.0f;
float x_j = 0.0f;
float y_j = 0.0f;
float z_j = 0.0f;
if (i+j >= 0 && i+j<N) {
ct_j = LDG(ct,i+j);
x_j = LDG(x,i+j);
y_j = LDG(y,i+j);
z_j = LDG(z,i+j);
}
for (int d=1; d<33; d++) {
ct_j = __shfl(ct_j, laneid+1);
x_j = __shfl(x_j, laneid+1);
y_j = __shfl(y_j, laneid+1);
z_j = __shfl(z_j, laneid+1);
float diffct = ct_i - ct_j;
float diffx = x_i - x_j;
float diffy = y_i - y_j;
float diffz = z_i - z_j;
if (i+j >= 0 && i+j<N && output && (diffct * diffct < diffx * diffx + diffy * diffy + diffz * diffz)) {
#if write_sums == 1
sum++;
#endif
#if write_spm == 1
#if write_rows
row_idx[offset + block_start] = bx+tx;
#endif
int c = laneid+d > 31 ? -32 : 0;
#if use_shared == 1
sh_col_idx[offset++] = i+j+d+c;
#else
col_idx[offset++] = i+j+d+c;
#endif
#endif
}
}
}
if (output) {
j-=laneid;
for (; j < window_width*2+1; j++) {
if (i+j >= 0 && i+j<N) {
float diffct = ct_i - LDG(ct,i+j);
float diffx = x_i - LDG(x,i+j);
float diffy = y_i - LDG(y,i+j);
float diffz = z_i - LDG(z,i+j);
if (diffct * diffct < diffx * diffx + diffy * diffy + diffz * diffz) {
#if write_sums == 1
sum++;
#endif
#if write_spm == 1
#if write_rows
row_idx[offset + block_start] = bx+tx;
#endif
#if use_shared == 1
sh_col_idx[offset++] = i+j;
#else
col_idx[offset++] = i+j;
#endif
#endif
}
}
}
} // end of if output
#if write_sums == 1
if (bx+tx < N) {
sums[bx+tx] = sum;
}
#endif
//collaboratively write back the output collected in shared memory to global memory
#if use_shared == 1 && write_spm == 1
int block_stop = 0;
int last_i = bx + block_size_x-1;
if (last_i < N) {
block_stop = prefix_sums[last_i];
} else {
block_stop = prefix_sums[N-1];
}
__syncthreads(); //ensure all threads are done writing shared memory
for (int k=block_start+tx; k<block_stop; k+=block_size_x) {
if (k-block_start >= 0 && k-block_start < shared_memory_size-1)
col_idx[k] = sh_col_idx[k-block_start];
}
#endif
}
|
529a51e8cffe27a09836c02d4d7a0a62ec1b8969.cu
|
#include <stdio.h>
#include <inttypes.h>
#include <float.h>
#ifndef tile_size_x
#define tile_size_x 1
#endif
#ifndef block_size_x
#define block_size_x 512
#endif
#ifndef window_width
#define window_width 1500
#endif
#define USE_READ_ONLY_CACHE read_only
#if USE_READ_ONLY_CACHE == 1
#define LDG(x, y) __ldg(x+y)
#elif USE_READ_ONLY_CACHE == 0
#define LDG(x, y) x[y]
#endif
#ifndef write_sums
#define write_sums 0
#endif
#ifndef write_spm
#define write_spm 0
#endif
#ifndef write_rows
#define write_rows 0
#endif
#ifndef use_shared
#define use_shared 0
#endif
extern "C" {
__global__ void quadratic_difference_full(int *__restrict__ row_idx, int *__restrict__ col_idx, const int *__restrict__ prefix_sums, int *__restrict__ sums,
int N, int sliding_window_width, const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ z,
const float *__restrict__ ct);
__global__ void match3b_full(int *__restrict__ row_idx, int *__restrict__ col_idx, const int *__restrict__ prefix_sums, int *__restrict__ sums,
int N, int sliding_window_width, const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ z,
const float *__restrict__ ct);
}
template<typename F>
__device__ void correlate_full(int *__restrict__ row_idx, int *__restrict__ col_idx, const int *__restrict__ prefix_sums, int *__restrict__ sums,
int N, const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ z,
const float *__restrict__ ct, F criterion);
__forceinline__ __device__ bool match3b(float x1, float y1, float z1, float t1, float x2, float y2, float z2, float t2);
__forceinline__ __device__ bool quadratic_difference(float x1, float y1, float z1, float ct1, float x2, float y2, float z2, float ct2);
/*
* This is the kernel used for computing correlations in both directions using the quadratic difference criterion
*/
__global__ void quadratic_difference_full(int *__restrict__ row_idx, int *__restrict__ col_idx, const int *__restrict__ prefix_sums, int *__restrict__ sums,
int N, int sliding_window_width, const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ z,
const float *__restrict__ ct) {
correlate_full(row_idx, col_idx, prefix_sums, sums, N, x, y, z, ct, quadratic_difference);
}
/*
* This is the kernel used for computing correlations in both directions using the match 3b criterion
*/
__global__ void match3b_full(int *__restrict__ row_idx, int *__restrict__ col_idx, const int *__restrict__ prefix_sums, int *__restrict__ sums,
int N, int sliding_window_width, const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ z,
const float *__restrict__ ct) {
correlate_full(row_idx, col_idx, prefix_sums, sums, N, x, y, z, ct, match3b);
}
/*
* This function fills shared memory will values from global memory
*
* The area loaded is equal to the working set of this thread block (block_size_x * tile_size_x) plus the window_width
*
* The threads in a thread block will load values in global memory from their global index 'i' up to block_size_x*tile_size_x+window_width
* It is possible to modify which values from global memory are loaded by using the parameter 'offset'
* The threads can skip the first x elements of shared memory by using a non zero value for 'start'
* N is the total number of hits in the input, used to guard out-of-bound accesses
*
* first loading phase, start=0, offset=bx-window_width
* second loading phase, start=block_size_x*tile_size_x, offset=bx
*/
__forceinline__ __device__ void fill_shared_memory(float *sh_ct, float *sh_x, float *sh_y, float* sh_z,
const float *ct, const float *x, const float *y, const float *z,
int bx, int i, int start, int offset, int N) {
#pragma unroll
for (int k=start+i; k < block_size_x*tile_size_x+window_width; k+=block_size_x) {
if (k+offset >= 0 && k+offset < N) {
sh_ct[k] = LDG(ct,k+offset);
sh_x[k] = LDG(x,k+offset);
sh_y[k] = LDG(y,k+offset);
sh_z[k] = LDG(z,k+offset);
} else {
sh_ct[k] = (float) NAN; //this value ensures out-of-bound hits won't be correlated
sh_x[k] = 0.0f;
sh_y[k] = 0.0f;
sh_z[k] = 0.0f;
}
}
}
/*
* This function is responsible for looping over the iteration space of each thread
* For each correlation to be computed it will call the criterion and either
* store the number of correlations or the coordinates of the correlated hit.
*/
template<typename F>
__forceinline__ __device__ void correlate(int *row_idx, int *col_idx, int *sum, int *offset, int bx, int i,
float *l_x, float *l_y, float *l_z, float *l_ct, float *sh_x, float *sh_y, float *sh_z, float *sh_ct, int col_offset, int it_offset, F criterion) {
for (int j=it_offset; j < window_width+it_offset; j++) {
#pragma unroll
for (int ti=0; ti<tile_size_x; ti++) {
bool condition = criterion(l_x[ti], l_y[ti], l_z[ti], l_ct[ti],
sh_x[i+ti*block_size_x+j], sh_y[i+ti*block_size_x+j],
sh_z[i+ti*block_size_x+j], sh_ct[i+ti*block_size_x+j]);
if (condition) {
#if write_spm == 1
#if write_rows
row_idx[offset[ti]] = bx+i+ti*block_size_x;
#endif
col_idx[offset[ti]] = bx+i+ti*block_size_x+j+col_offset;
offset[ti] += 1;
#endif
#if write_sums == 1
sum[ti] += 1;
#endif
}
}
}
}
/*
* This function computes the correlated hits of hits no more than 'window_width' apart in both directions.
* It does this using a 1-dimensional mapping of threads and thread blocks to hits in this time slice.
*
* This function supports the usual set of optimizations, including tiling, read-only cache.
* Tuning parameters supported are 'read_only' [0,1], 'tile_size_x' any low number, and 'block_size_x' multiple of 32.
*
* 'write_sums' can be set to [0,1] to enable the code that outputs the number of correlated hits per hit
* This number is used to compute the offsets into the sparse matrix representation of the correlations table.
*
* 'write_spm' can be set to [0,1] to enable the code that outputs the sparse matrix
* 'write_rows' can be set to [0,1] to enable also writing the row_idx, only effective when write_spm=1
*
*/
template<typename F>
__device__ void correlate_full(int *__restrict__ row_idx, int *__restrict__ col_idx, const int *__restrict__ prefix_sums, int *__restrict__ sums,
int N, const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ z,
const float *__restrict__ ct, F criterion) {
int i = threadIdx.x;
int bx = blockIdx.x * block_size_x * tile_size_x;
__shared__ float sh_ct[block_size_x * tile_size_x + window_width];
__shared__ float sh_x[block_size_x * tile_size_x + window_width];
__shared__ float sh_y[block_size_x * tile_size_x + window_width];
__shared__ float sh_z[block_size_x * tile_size_x + window_width];
//the first loading phase
fill_shared_memory(sh_ct, sh_x, sh_y, sh_z, ct, x, y, z, bx, i, 0, bx-window_width, N);
#if write_spm == 1
int offset[tile_size_x];
if (bx+i==0) {
offset[0] = 0;
}
#pragma unroll
for (int ti=0; ti<tile_size_x; ti++) {
if (bx+i+ti*block_size_x-1 >= 0 && bx+i+ti*block_size_x-1 < N) {
offset[ti] = prefix_sums[bx+i+ti*block_size_x-1];
}
}
#else
int *offset = (int *)0;
#endif
__syncthreads();
//start of the the computations phase
float l_ct[tile_size_x];
float l_x[tile_size_x];
float l_y[tile_size_x];
float l_z[tile_size_x];
#if write_sums == 1
int sum[tile_size_x];
#else
int *sum = (int *)0;
#endif
//keep the most often used values in registers
#pragma unroll
for (int ti=0; ti<tile_size_x; ti++) {
l_ct[ti] = sh_ct[i+ti*block_size_x+window_width];
l_x[ti] = sh_x[i+ti*block_size_x+window_width];
l_y[ti] = sh_y[i+ti*block_size_x+window_width];
l_z[ti] = sh_z[i+ti*block_size_x+window_width];
#if write_sums == 1
sum[ti] = 0;
#endif
}
//first loop computes correlations with earlier hits
correlate(row_idx, col_idx, sum, offset, bx, i, l_x, l_y, l_z, l_ct,
sh_x, sh_y, sh_z, sh_ct, -window_width, 0, criterion);
//make sure all threads are done with phase-1
__syncthreads();
//start load phase-2
//fill the first part of shared memory with data already in registers
#pragma unroll
for (int ti=0; ti<tile_size_x; ti++) {
sh_ct[i+ti*block_size_x] = l_ct[ti];
sh_x[i+ti*block_size_x] = l_x[ti];
sh_y[i+ti*block_size_x] = l_y[ti];
sh_z[i+ti*block_size_x] = l_z[ti];
}
//the first block_size_x*tile_size_x part has already been filled
fill_shared_memory(sh_ct, sh_x, sh_y, sh_z, ct, x, y, z, bx, i, block_size_x*tile_size_x, bx, N);
__syncthreads();
//the next loop computes correlations with hits later in time
correlate(row_idx, col_idx, sum, offset, bx, i, l_x, l_y, l_z, l_ct,
sh_x, sh_y, sh_z, sh_ct, 0, 1, criterion);
#if write_sums == 1
for (int ti=0; ti<tile_size_x; ti++) {
if (bx+i+ti*block_size_x < N) {
sums[bx+i+ti*block_size_x] = sum[ti];
}
}
#endif
}
//constants needed by Match 3B criterion
//the reason that we hard code the constants like this is that we can't use sqrt in device constants sadly
#define roadwidth 90.0f
#define speed_of_light 0.299792458f // m/ns
#define inverse_c (1.0f/speed_of_light)
#define index_of_refrac 1.3800851282f // average index of refraction of water
#define D0 (roadwidth)
#define D1 (roadwidth * 2.0f)
#define D02 (D0 * D0)
#define D12 (D1 * D1)
#define R2 (roadwidth * roadwidth)
#define Rs2 3847.2165714f
#define Rst 58.9942930573f
#define D22 42228.1334918f
#define Rt 85.6010699976f
/*
* This function implements the Match 3B criterion
*/
__forceinline__ __device__ bool match3b(float x1, float y1, float z1, float t1, float x2, float y2, float z2, float t2) {
float difft = fabsf(t1 - t2);
if (isnan(difft)) {
return false;
}
float d2 = ((x1-x2)*(x1-x2)) + ((y1-y2)*(y1-y2)) + ((z1-z2)*(z1-z2));
float dmax = 0.0f;
if (d2 < D02) {
dmax = sqrtf(d2) * index_of_refrac;
} else {
dmax = sqrtf(d2 - Rs2) + Rst;
}
if (difft > (dmax * inverse_c)) {
return false;
}
float dmin = 0.0f;
if (d2 > D22) {
dmin = sqrtf(d2 - R2) - Rt;
} else if (d2 > D12) {
dmin = sqrtf(d2 - D12);
} else {
return true;
}
return (difft >= (dmin*inverse_c));
}
/*
* This function implements the quadratic differnce criterion
*/
__forceinline__ __device__ bool quadratic_difference(float x1, float y1, float z1, float ct1, float x2, float y2, float z2, float ct2) {
float diffct = ct1 - ct2;
float diffx = x1 - x2;
float diffy = y1 - y2;
float diffz = z1 - z2;
return (diffct * diffct < diffx * diffx + diffy * diffy + diffz * diffz);
}
/*
* This is a small helper kernel to ready the input for the Match 3B kernel, currently not in use
*/
__global__ void convert_ct_to_t(float *ct, int n) {
int i = threadIdx.x + blockIdx.x * block_size_x;
if (i<n) {
ct[i] = ct[i] * inverse_c;
}
}
#ifndef shared_memory_size
#define shared_memory_size 10*block_size_x
#endif
/*
* This kernel is an experimental version of the above quadratic_difference_full kernel.
* It is not production ready and needs more work.
*
* This kernel uses warp-shuffle instructions to re-use many of
* the input values in registers and reduce the pressure on shared memory.
* However, it does this so drastically that shared memory is hardly needed anymore.
*
* Tuning parameters supported are 'block_size_x', 'read_only' [0,1], 'use_if' [0,1]
*
*/
__global__ void quadratic_difference_full_shfl(int *__restrict__ row_idx, int *__restrict__ col_idx, int *__restrict__ prefix_sums, int *sums, int N, int sliding_window_width,
const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ z, const float *__restrict__ ct) {
int tx = threadIdx.x;
int bx = blockIdx.x * block_size_x;
#if write_sums == 1
int sum = 0;
#endif
#if write_spm == 1
int offset = 0;
#if use_shared == 1
int block_start = 0;
__shared__ int sh_col_idx[shared_memory_size];
if (blockIdx.x > 0) {
block_start = prefix_sums[bx-1];
}
#elif write_rows == 1
int block_start = 0;
#endif
#endif
float ct_i = 0.0f;
float x_i = 0.0f;
float y_i = 0.0f;
float z_i = 0.0f;
int output = 0;
int i = bx + tx - window_width;
if (bx+tx < N) {
output = 1;
ct_i = LDG(ct,bx+tx);
x_i = LDG(x,bx+tx);
y_i = LDG(y,bx+tx);
z_i = LDG(z,bx+tx);
}
#if write_spm == 1
if (bx+tx > 0 && bx+tx < N) {
offset = prefix_sums[bx+tx-1];
}
#if use_shared == 1
offset -= block_start;
#endif
#endif
int laneid = tx & (32-1);
if (output) {
for (int j=0; j < 32-laneid && output; j++) {
if (i+j >= 0 && i+j<N) {
float diffct = ct_i - LDG(ct,i+j);
float diffx = x_i - LDG(x,i+j);
float diffy = y_i - LDG(y,i+j);
float diffz = z_i - LDG(z,i+j);
if (diffct * diffct < diffx * diffx + diffy * diffy + diffz * diffz) {
#if write_sums == 1
sum++;
#endif
#if write_spm == 1
#if write_rows
row_idx[offset + block_start] = bx+tx;
#endif
#if use_shared == 1
sh_col_idx[offset++] = i+j;
#else
col_idx[offset++] = i+j;
#endif
#endif
}
}
}
}//end of if output
int j;
#if f_unroll == 2
#pragma unroll 2
#elif f_unroll == 4
#pragma unroll 4
#endif
for (j=32; j < window_width*2-32; j+=32) {
float ct_j = 0.0f;
float x_j = 0.0f;
float y_j = 0.0f;
float z_j = 0.0f;
if (i+j >= 0 && i+j<N) {
ct_j = LDG(ct,i+j);
x_j = LDG(x,i+j);
y_j = LDG(y,i+j);
z_j = LDG(z,i+j);
}
for (int d=1; d<33; d++) {
ct_j = __shfl(ct_j, laneid+1);
x_j = __shfl(x_j, laneid+1);
y_j = __shfl(y_j, laneid+1);
z_j = __shfl(z_j, laneid+1);
float diffct = ct_i - ct_j;
float diffx = x_i - x_j;
float diffy = y_i - y_j;
float diffz = z_i - z_j;
if (i+j >= 0 && i+j<N && output && (diffct * diffct < diffx * diffx + diffy * diffy + diffz * diffz)) {
#if write_sums == 1
sum++;
#endif
#if write_spm == 1
#if write_rows
row_idx[offset + block_start] = bx+tx;
#endif
int c = laneid+d > 31 ? -32 : 0;
#if use_shared == 1
sh_col_idx[offset++] = i+j+d+c;
#else
col_idx[offset++] = i+j+d+c;
#endif
#endif
}
}
}
if (output) {
j-=laneid;
for (; j < window_width*2+1; j++) {
if (i+j >= 0 && i+j<N) {
float diffct = ct_i - LDG(ct,i+j);
float diffx = x_i - LDG(x,i+j);
float diffy = y_i - LDG(y,i+j);
float diffz = z_i - LDG(z,i+j);
if (diffct * diffct < diffx * diffx + diffy * diffy + diffz * diffz) {
#if write_sums == 1
sum++;
#endif
#if write_spm == 1
#if write_rows
row_idx[offset + block_start] = bx+tx;
#endif
#if use_shared == 1
sh_col_idx[offset++] = i+j;
#else
col_idx[offset++] = i+j;
#endif
#endif
}
}
}
} // end of if output
#if write_sums == 1
if (bx+tx < N) {
sums[bx+tx] = sum;
}
#endif
//collaboratively write back the output collected in shared memory to global memory
#if use_shared == 1 && write_spm == 1
int block_stop = 0;
int last_i = bx + block_size_x-1;
if (last_i < N) {
block_stop = prefix_sums[last_i];
} else {
block_stop = prefix_sums[N-1];
}
__syncthreads(); //ensure all threads are done writing shared memory
for (int k=block_start+tx; k<block_stop; k+=block_size_x) {
if (k-block_start >= 0 && k-block_start < shared_memory_size-1)
col_idx[k] = sh_col_idx[k-block_start];
}
#endif
}
|
7ab0b93b4f7402c57fed29c10a89c207145f4905.hip
|
// !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using FDK weigts for CBCT
*
*
* CODE by Ander Biguri
* Optimized and modified by RB
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "voxel_backprojection.hpp"
#include "mex.h"
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",hipGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
texture<float, hipTextureType2DLayered , hipReadModeElementType> tex;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were:
// PROJ_PER_KERNEL = 32 or 16 (very similar times)
// VOXELS_PER_THREAD = 8
// Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code.
// (e.g. 16.2 s vs. ~62 s).
const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck.
const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck.
// We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection:
// deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec
// So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel
// (they will be updated in the main loop before each kernel call).
__constant__ Point3D projParamsArrayDev[6*PROJ_PER_KERNEL]; // Dev means it is on device
// We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above)
Point3D projParamsArrayHost[6*PROJ_PER_KERNEL]; // Host means it is host memory
// Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection)
__constant__ float projSinCosArrayDev[3*PROJ_PER_KERNEL];
float projSinCosArrayHost[3*PROJ_PER_KERNEL];
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//______________________________________________________________________________
//
// Function: kernelPixelBackprojectionFDK
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
__global__ void kernelPixelBackprojectionFDK(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections)
{
// Old kernel call signature:
//hipLaunchKernelGGL(( kernelPixelBackprojectionFDK), dim3(grid),dim3(block), 0, 0, geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,sinalpha,cosalpha);
// We just read in most of the params from the constant memory instead of getting them from the param list.
// This is because we now have MANY params, since single kernel processes more than one projection!
/* __global__ void kernelPixelBackprojectionFDK(const Geometry geo,
* float* image,
* const int indAlpha,
* const Point3D deltaX ,
* const Point3D deltaY,
* const Point3D deltaZ,
* const Point3D xyzOrigin,
* const Point3D xyzOffset,
* const Point3D uv0Offset,
* const float sinalpha,
* const float cosalpha){
*/
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
//Make sure we dont go out of bounds
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |startIndZ>=geo.nVoxelZ)
return;
// We'll keep a local auxiliary array of values of a column of voxels that this thread will update
float voxelColumn[VOXELS_PER_THREAD];
// First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then
// work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes
int colIdx;
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
} // END copy 3D volume voxels to local array
// Now iterate through projections
#pragma unroll
for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++)
{
// Get the current parameters from parameter arrays in constant memory.
int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array
// Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK.
if(indAlpha>=totalNoOfProjections)
break;
Point3D deltaX = projParamsArrayDev[6*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaY = projParamsArrayDev[6*projNumber+1];
Point3D deltaZ = projParamsArrayDev[6*projNumber+2];
Point3D xyzOrigin = projParamsArrayDev[6*projNumber+3];
Point3D xyzOffset = projParamsArrayDev[6*projNumber+4];
Point3D S = projParamsArrayDev[6*projNumber+5];
float sinalpha = projSinCosArrayDev[3*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection
float cosalpha = projSinCosArrayDev[3*projNumber+1];
float COR = projSinCosArrayDev[3*projNumber+2];
// Now iterate through Z in our voxel column FOR A GIVEN PROJECTION
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
// "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=(geo.DSO-geo.DSD /*-DOD*/ - S.x)/vectX;
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+geo.nDetecU/2-0.5;
v=z+geo.nDetecV/2-0.5;
float weigth;
float realx,realy;
realx=-geo.sVoxelX/2+geo.dVoxelX/2 +indX*geo.dVoxelX +xyzOffset.x;
realy=-geo.sVoxelY/2+geo.dVoxelY/2 +indY*geo.dVoxelY +xyzOffset.y+COR;
weigth=(geo.DSO+realy*sinalpha-realx*cosalpha)/geo.DSO;
weigth=1/(weigth*weigth);
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
// indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!)
voxelColumn[colIdx]+=tex2DLayered(tex, v +0.5 ,
u +0.5 ,
indAlpha)*weigth;
} // END iterating through column of voxels
} // END iterating through multiple projections
// And finally copy the updated local voxelColumn array back to our 3D volume (main memory)
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
// According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write.
// We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is
// better for avoiding memory congestion.
} // END copy updated voxels from local array to our 3D volume
} // END kernelPixelBackprojectionFDK
//______________________________________________________________________________
//
// Function: voxel_backprojection
//
// Description: Main host function for FDK backprojection (invokes the kernel)
//______________________________________________________________________________
int voxel_backprojection(float const * const projections, Geometry geo, float* result,float const * const alphas, int nalpha)
{
/*
* Allocate texture memory on the device
*/
// copy data to CUDA memory
hipArray *d_projectiondata = 0;
const hipExtent extent = make_hipExtent(geo.nDetecV,geo.nDetecU,nalpha);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipMalloc3DArray(&d_projectiondata, &channelDesc, extent,hipArrayLayered);
cudaCheckErrors("hipMalloc3D error 3D tex");
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_projectiondata;
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3D(©Params);
cudaCheckErrors("hipMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = hipFilterModeLinear;
tex.addressMode[0] = hipAddressModeBorder;
tex.addressMode[1] = hipAddressModeBorder;
tex.addressMode[2] = hipAddressModeBorder;
hipBindTextureToArray(tex, d_projectiondata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
// Allocate result image memory
size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float);
float* dimage;
hipMalloc((void**)&dimage, num_bytes);
hipMemset(dimage,0,num_bytes);
cudaCheckErrors("hipMalloc fail");
// If we are going to time
bool timekernel=false;
hipEvent_t start, stop;
float elapsedTime;
int divx,divy,divz;
// RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y).
// I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so
// let's stick with the values from Zinsser and Keck.
divx=16;
divy=32;
divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks!
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geo.nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1)
//////////////////////////////////////////////////////////////////////////////////////
// Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
// Since we'll have multiple projections processed by a SINGLE kernel call, compute how many
// kernel calls we'll need altogether.
int noOfKernelCalls = (nalpha+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL
for (unsigned int i=0; i<noOfKernelCalls; i++)
{
// Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it
int j;
for(j=0; j<PROJ_PER_KERNEL; j++)
{
int currProjNumber=i*PROJ_PER_KERNEL+j;
if(currProjNumber>=nalpha)
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, /*offDetec,*/source;
float sinalpha,cosalpha;
geo.alpha=-alphas[currProjNumber];
sinalpha=sin(geo.alpha);
cosalpha=cos(geo.alpha);
projSinCosArrayHost[3*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection
projSinCosArrayHost[3*j+1]=cosalpha;
projSinCosArrayHost[3*j+2]=geo.COR[currProjNumber];
computeDeltasCube(geo,geo.alpha,currProjNumber,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[currProjNumber];
offOrig.y=geo.offOrigY[currProjNumber];
offOrig.z=geo.offOrigZ[currProjNumber];
projParamsArrayHost[6*j]=deltaX; // 6*j because we have 6 Point3D values per projection
projParamsArrayHost[6*j+1]=deltaY;
projParamsArrayHost[6*j+2]=deltaZ;
projParamsArrayHost[6*j+3]=xyzOrigin;
projParamsArrayHost[6*j+4]=offOrig;
projParamsArrayHost[6*j+5]=source;
} // END for (preparing params for kernel call)
// Copy the prepared parameter arrays to constant memory to make it available for the kernel
hipMemcpyToSymbol(projSinCosArrayDev, projSinCosArrayHost, sizeof(float)*3*PROJ_PER_KERNEL);
hipMemcpyToSymbol(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*6*PROJ_PER_KERNEL);
if (timekernel){
hipEventCreate(&start);
hipEventRecord(start,0);
}
hipLaunchKernelGGL(( kernelPixelBackprojectionFDK), dim3(grid),dim3(block), 0, 0, geo,dimage,i,nalpha);
cudaCheckErrors("Kernel fail");
if (timekernel)
{
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
mexPrintf("%f\n" ,elapsedTime);
cudaCheckErrors("cuda Timing fail");
}
} // END for
//////////////////////////////////////////////////////////////////////////////////////
// END Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
hipMemcpy(result, dimage, num_bytes, hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy result fail");
hipUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
hipFree(dimage);
hipFreeArray(d_projectiondata);
cudaCheckErrors("hipFree d_imagedata fail");
hipDeviceReset(); // For the Nvidia Visual Profiler
return 0;
} // END voxel_backprojection
//______________________________________________________________________________
//
// Function: computeDeltasCube
//
// Description: Computes relative increments for each projection (volume rotation).
// Increments get passed to the backprojection kernel.
//______________________________________________________________________________
void computeDeltasCube(Geometry geo, float alpha,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S)
{
Point3D P0, Px0,Py0,Pz0, source;
// Get coords of Img(0,0,0)
P0.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P0.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P0.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px0.x=P0.x+geo.dVoxelX; Py0.x=P0.x; Pz0.x=P0.x;
Px0.y=P0.y; Py0.y=P0.y+geo.dVoxelY; Pz0.y=P0.y;
Px0.z=P0.z; Py0.z=P0.z; Pz0.z=P0.z+geo.dVoxelZ;
// Rotate image (this is equivalent of rotating the source and detector)
Point3D P, Px,Py,Pz; // We need other auxiliar variables to be able to perform the rotation, or we would overwrite values!
P.x =P0.x *cos(alpha)-P0.y *sin(alpha); P.y =P0.x *sin(alpha)+P0.y *cos(alpha); P.z =P0.z;
Px.x=Px0.x*cos(alpha)-Px0.y*sin(alpha); Px.y=Px0.x*sin(alpha)+Px0.y*cos(alpha); Px.z=Px0.z;
Py.x=Py0.x*cos(alpha)-Py0.y*sin(alpha); Py.y=Py0.x*sin(alpha)+Py0.y*cos(alpha); Py.z=Py0.z;
Pz.x=Pz0.x*cos(alpha)-Pz0.y*sin(alpha); Pz.y=Pz0.x*sin(alpha)+Pz0.y*cos(alpha); Pz.z=Pz0.z;
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
P.x=P.x+(geo.DSD-geo.DSO);
Px.x=Px.x+(geo.DSD-geo.DSO);
Py.x=Py.x+(geo.DSD-geo.DSO);
Pz.x=Pz.x+(geo.DSD-geo.DSO);
rollPitchYawT(geo,i,&P);
rollPitchYawT(geo,i,&Px);
rollPitchYawT(geo,i,&Py);
rollPitchYawT(geo,i,&Pz);
P.x=P.x-(geo.DSD-geo.DSO);
Px.x=Px.x-(geo.DSD-geo.DSO);
Py.x=Py.x-(geo.DSD-geo.DSO);
Pz.x=Pz.x-(geo.DSD-geo.DSO);
//Done for P, now source
source.x=geo.DSD; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
rollPitchYawT(geo,i,&source);
source.x=source.x-(geo.DSD-geo.DSO);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z;
// mexPrintf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
} // END computeDeltasCube
void rollPitchYawT(Geometry geo,int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y
-sin(geo.dPitch[i])*auxPoint.z;
point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z;
point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
|
7ab0b93b4f7402c57fed29c10a89c207145f4905.cu
|
/*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using FDK weigts for CBCT
*
*
* CODE by Ander Biguri
* Optimized and modified by RB
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "voxel_backprojection.hpp"
#include "mex.h"
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",cudaGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
texture<float, cudaTextureType2DLayered , cudaReadModeElementType> tex;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were:
// PROJ_PER_KERNEL = 32 or 16 (very similar times)
// VOXELS_PER_THREAD = 8
// Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code.
// (e.g. 16.2 s vs. ~62 s).
const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck.
const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck.
// We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection:
// deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec
// So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel
// (they will be updated in the main loop before each kernel call).
__constant__ Point3D projParamsArrayDev[6*PROJ_PER_KERNEL]; // Dev means it is on device
// We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above)
Point3D projParamsArrayHost[6*PROJ_PER_KERNEL]; // Host means it is host memory
// Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection)
__constant__ float projSinCosArrayDev[3*PROJ_PER_KERNEL];
float projSinCosArrayHost[3*PROJ_PER_KERNEL];
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//______________________________________________________________________________
//
// Function: kernelPixelBackprojectionFDK
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
__global__ void kernelPixelBackprojectionFDK(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections)
{
// Old kernel call signature:
// kernelPixelBackprojectionFDK<<<grid,block>>>(geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,sinalpha,cosalpha);
// We just read in most of the params from the constant memory instead of getting them from the param list.
// This is because we now have MANY params, since single kernel processes more than one projection!
/* __global__ void kernelPixelBackprojectionFDK(const Geometry geo,
* float* image,
* const int indAlpha,
* const Point3D deltaX ,
* const Point3D deltaY,
* const Point3D deltaZ,
* const Point3D xyzOrigin,
* const Point3D xyzOffset,
* const Point3D uv0Offset,
* const float sinalpha,
* const float cosalpha){
*/
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
//Make sure we dont go out of bounds
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |startIndZ>=geo.nVoxelZ)
return;
// We'll keep a local auxiliary array of values of a column of voxels that this thread will update
float voxelColumn[VOXELS_PER_THREAD];
// First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then
// work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes
int colIdx;
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
} // END copy 3D volume voxels to local array
// Now iterate through projections
#pragma unroll
for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++)
{
// Get the current parameters from parameter arrays in constant memory.
int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array
// Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK.
if(indAlpha>=totalNoOfProjections)
break;
Point3D deltaX = projParamsArrayDev[6*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaY = projParamsArrayDev[6*projNumber+1];
Point3D deltaZ = projParamsArrayDev[6*projNumber+2];
Point3D xyzOrigin = projParamsArrayDev[6*projNumber+3];
Point3D xyzOffset = projParamsArrayDev[6*projNumber+4];
Point3D S = projParamsArrayDev[6*projNumber+5];
float sinalpha = projSinCosArrayDev[3*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection
float cosalpha = projSinCosArrayDev[3*projNumber+1];
float COR = projSinCosArrayDev[3*projNumber+2];
// Now iterate through Z in our voxel column FOR A GIVEN PROJECTION
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
// "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=(geo.DSO-geo.DSD /*-DOD*/ - S.x)/vectX;
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+geo.nDetecU/2-0.5;
v=z+geo.nDetecV/2-0.5;
float weigth;
float realx,realy;
realx=-geo.sVoxelX/2+geo.dVoxelX/2 +indX*geo.dVoxelX +xyzOffset.x;
realy=-geo.sVoxelY/2+geo.dVoxelY/2 +indY*geo.dVoxelY +xyzOffset.y+COR;
weigth=(geo.DSO+realy*sinalpha-realx*cosalpha)/geo.DSO;
weigth=1/(weigth*weigth);
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
// indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!)
voxelColumn[colIdx]+=tex2DLayered(tex, v +0.5 ,
u +0.5 ,
indAlpha)*weigth;
} // END iterating through column of voxels
} // END iterating through multiple projections
// And finally copy the updated local voxelColumn array back to our 3D volume (main memory)
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
// According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write.
// We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is
// better for avoiding memory congestion.
} // END copy updated voxels from local array to our 3D volume
} // END kernelPixelBackprojectionFDK
//______________________________________________________________________________
//
// Function: voxel_backprojection
//
// Description: Main host function for FDK backprojection (invokes the kernel)
//______________________________________________________________________________
int voxel_backprojection(float const * const projections, Geometry geo, float* result,float const * const alphas, int nalpha)
{
/*
* Allocate texture memory on the device
*/
// copy data to CUDA memory
cudaArray *d_projectiondata = 0;
const cudaExtent extent = make_cudaExtent(geo.nDetecV,geo.nDetecU,nalpha);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaMalloc3DArray(&d_projectiondata, &channelDesc, extent,cudaArrayLayered);
cudaCheckErrors("cudaMalloc3D error 3D tex");
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_projectiondata;
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(©Params);
cudaCheckErrors("cudaMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = cudaFilterModeLinear;
tex.addressMode[0] = cudaAddressModeBorder;
tex.addressMode[1] = cudaAddressModeBorder;
tex.addressMode[2] = cudaAddressModeBorder;
cudaBindTextureToArray(tex, d_projectiondata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
// Allocate result image memory
size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float);
float* dimage;
cudaMalloc((void**)&dimage, num_bytes);
cudaMemset(dimage,0,num_bytes);
cudaCheckErrors("cudaMalloc fail");
// If we are going to time
bool timekernel=false;
cudaEvent_t start, stop;
float elapsedTime;
int divx,divy,divz;
// RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y).
// I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so
// let's stick with the values from Zinsser and Keck.
divx=16;
divy=32;
divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks!
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geo.nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1)
//////////////////////////////////////////////////////////////////////////////////////
// Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
// Since we'll have multiple projections processed by a SINGLE kernel call, compute how many
// kernel calls we'll need altogether.
int noOfKernelCalls = (nalpha+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL
for (unsigned int i=0; i<noOfKernelCalls; i++)
{
// Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it
int j;
for(j=0; j<PROJ_PER_KERNEL; j++)
{
int currProjNumber=i*PROJ_PER_KERNEL+j;
if(currProjNumber>=nalpha)
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, /*offDetec,*/source;
float sinalpha,cosalpha;
geo.alpha=-alphas[currProjNumber];
sinalpha=sin(geo.alpha);
cosalpha=cos(geo.alpha);
projSinCosArrayHost[3*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection
projSinCosArrayHost[3*j+1]=cosalpha;
projSinCosArrayHost[3*j+2]=geo.COR[currProjNumber];
computeDeltasCube(geo,geo.alpha,currProjNumber,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[currProjNumber];
offOrig.y=geo.offOrigY[currProjNumber];
offOrig.z=geo.offOrigZ[currProjNumber];
projParamsArrayHost[6*j]=deltaX; // 6*j because we have 6 Point3D values per projection
projParamsArrayHost[6*j+1]=deltaY;
projParamsArrayHost[6*j+2]=deltaZ;
projParamsArrayHost[6*j+3]=xyzOrigin;
projParamsArrayHost[6*j+4]=offOrig;
projParamsArrayHost[6*j+5]=source;
} // END for (preparing params for kernel call)
// Copy the prepared parameter arrays to constant memory to make it available for the kernel
cudaMemcpyToSymbol(projSinCosArrayDev, projSinCosArrayHost, sizeof(float)*3*PROJ_PER_KERNEL);
cudaMemcpyToSymbol(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*6*PROJ_PER_KERNEL);
if (timekernel){
cudaEventCreate(&start);
cudaEventRecord(start,0);
}
kernelPixelBackprojectionFDK<<<grid,block>>>(geo,dimage,i,nalpha);
cudaCheckErrors("Kernel fail");
if (timekernel)
{
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
mexPrintf("%f\n" ,elapsedTime);
cudaCheckErrors("cuda Timing fail");
}
} // END for
//////////////////////////////////////////////////////////////////////////////////////
// END Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
cudaMemcpy(result, dimage, num_bytes, cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy result fail");
cudaUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
cudaFree(dimage);
cudaFreeArray(d_projectiondata);
cudaCheckErrors("cudaFree d_imagedata fail");
cudaDeviceReset(); // For the Nvidia Visual Profiler
return 0;
} // END voxel_backprojection
//______________________________________________________________________________
//
// Function: computeDeltasCube
//
// Description: Computes relative increments for each projection (volume rotation).
// Increments get passed to the backprojection kernel.
//______________________________________________________________________________
void computeDeltasCube(Geometry geo, float alpha,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S)
{
Point3D P0, Px0,Py0,Pz0, source;
// Get coords of Img(0,0,0)
P0.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P0.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P0.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px0.x=P0.x+geo.dVoxelX; Py0.x=P0.x; Pz0.x=P0.x;
Px0.y=P0.y; Py0.y=P0.y+geo.dVoxelY; Pz0.y=P0.y;
Px0.z=P0.z; Py0.z=P0.z; Pz0.z=P0.z+geo.dVoxelZ;
// Rotate image (this is equivalent of rotating the source and detector)
Point3D P, Px,Py,Pz; // We need other auxiliar variables to be able to perform the rotation, or we would overwrite values!
P.x =P0.x *cos(alpha)-P0.y *sin(alpha); P.y =P0.x *sin(alpha)+P0.y *cos(alpha); P.z =P0.z;
Px.x=Px0.x*cos(alpha)-Px0.y*sin(alpha); Px.y=Px0.x*sin(alpha)+Px0.y*cos(alpha); Px.z=Px0.z;
Py.x=Py0.x*cos(alpha)-Py0.y*sin(alpha); Py.y=Py0.x*sin(alpha)+Py0.y*cos(alpha); Py.z=Py0.z;
Pz.x=Pz0.x*cos(alpha)-Pz0.y*sin(alpha); Pz.y=Pz0.x*sin(alpha)+Pz0.y*cos(alpha); Pz.z=Pz0.z;
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
P.x=P.x+(geo.DSD-geo.DSO);
Px.x=Px.x+(geo.DSD-geo.DSO);
Py.x=Py.x+(geo.DSD-geo.DSO);
Pz.x=Pz.x+(geo.DSD-geo.DSO);
rollPitchYawT(geo,i,&P);
rollPitchYawT(geo,i,&Px);
rollPitchYawT(geo,i,&Py);
rollPitchYawT(geo,i,&Pz);
P.x=P.x-(geo.DSD-geo.DSO);
Px.x=Px.x-(geo.DSD-geo.DSO);
Py.x=Py.x-(geo.DSD-geo.DSO);
Pz.x=Pz.x-(geo.DSD-geo.DSO);
//Done for P, now source
source.x=geo.DSD; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
rollPitchYawT(geo,i,&source);
source.x=source.x-(geo.DSD-geo.DSO);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z;
// mexPrintf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
} // END computeDeltasCube
void rollPitchYawT(Geometry geo,int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y
-sin(geo.dPitch[i])*auxPoint.z;
point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z;
point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
|
dc76f4b36eaa50028895861d486f8d494864f298.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THHUNN/generic/MultiLabelMarginCriterion.hip"
#else
static inline void THNN_(MultiLabelMarginCriterion_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *target) {
if (input->dim() <= 1) {
int dim = input->dim() == 0 ? 1 : input->size(0);
int target_size = target->dim() == 0 ? 1 : target->size(0);
TORCH_CHECK(!target->is_empty() && (target->dim() <= 1) && (target_size == dim),
"inconsistent target size: ", target->sizes(), " for input of size: ", input->sizes());
} else if (input->dim() == 2) {
int nframe = input->size(0);
int dim = input->size(1);
TORCH_CHECK(!target->is_empty() && (target->dim() == 2)
&& (target->size(0) == nframe) && (target->size(1) == dim),
"inconsistent target size: ", target->sizes(), " for input of size: ", input->sizes());
} else {
TORCH_CHECK(false, "non-empty vector or matrix expected, got size: ", input->sizes());
}
}
// TODO: improve error messages
void THNN_(MultiLabelMarginCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
THCTensor *istarget,
int64_t reduction)
{
THNN_(MultiLabelMarginCriterion_shapeCheck)(state, input, target);
input = THCTensor_(newContiguous)(state, input);
target = THCIndexTensor_(newContiguous)(state, target);
istarget = THCTensor_(newContiguous)(state, istarget);
THCTensor_(resizeAs)(state, istarget, target);
if(input->dim() <= 1)
{
int dim = input->dim() == 0 ? 1 : input->size(0);
int target_size = target->dim() == 0 ? 1 : target->size(0);
THCTensor_(resize0d)(state, output);
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
1, dim,
reduction == at::Reduction::Mean
);
THCudaCheck(hipGetLastError());
}
else if(input->dim() == 2)
{
int nframe = input->size(0);
int dim = input->size(1);
dim3 blocks(input->size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
if (reduction != at::Reduction::None)
{
THCTensor *output_tmp = THCTensor_(newWithSize1d)(state, input->size(0));
THCTensor_(resize0d)(state, output);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output_tmp),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
nframe, dim,
reduction == at::Reduction::Mean
);
THCudaCheck(hipGetLastError());
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(THCTensor_(sumall)(state, output_tmp)));
THCTensor_(free)(state, output_tmp);
}
else
{
THCTensor_(resize1d)(state, output, input->size(0));
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
nframe, dim,
false
);
THCudaCheck(hipGetLastError());
}
}
else {
TORCH_INTERNAL_ASSERT(false, "non-empty vector or matrix expected (shouldn't get here)");
}
THCTensor_(free)(state, input);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, istarget);
}
void THNN_(MultiLabelMarginCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *istarget,
int64_t reduction)
{
input = THCTensor_(newContiguous)(state, input);
target = THCIndexTensor_(newContiguous)(state, target);
istarget = THCTensor_(newContiguous)(state, istarget);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
if(gradInput->dim() <= 1)
{
int dim = gradInput->dim() == 0 ? 1 : gradInput->size(0);
int target_size = target->dim() == 0 ? 1 : target->size(0);
THArgCheck(!target->is_empty() && (target->dim() <= 1) && (target_size == dim), 3,
"inconsistent target size");
TORCH_CHECK(target->sizes() == istarget->sizes(), "inconsistent isTarget size");
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
1, dim,
reduction == at::Reduction::Mean,
reduction != at::Reduction::None);
}
else if(gradInput->dim() == 2)
{
int nframe = gradInput->size(0);
int dim = gradInput->size(1);
THArgCheck(!target->is_empty() && (target->dim() == 2) && (target->size(0) == nframe)
&& (target->size(1) == dim), 3, "inconsistent target size");
THArgCheck(!istarget->is_empty() && (istarget->dim() == 2) && (istarget->size(0) == nframe)
&& (istarget->size(1) == dim), 3, "inconsistent isTarget size");
dim3 blocks(gradInput->size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
gradInput->size(0), gradInput->size(1),
reduction == at::Reduction::Mean,
reduction != at::Reduction::None);
}
else {
AT_ERROR("non-empty vector or matrix expected, got size: ", gradInput->sizes());
}
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, input);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, istarget);
THCTensor_(free)(state, gradOutput);
}
#endif
|
dc76f4b36eaa50028895861d486f8d494864f298.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THCUNN/generic/MultiLabelMarginCriterion.cu"
#else
static inline void THNN_(MultiLabelMarginCriterion_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *target) {
if (input->dim() <= 1) {
int dim = input->dim() == 0 ? 1 : input->size(0);
int target_size = target->dim() == 0 ? 1 : target->size(0);
TORCH_CHECK(!target->is_empty() && (target->dim() <= 1) && (target_size == dim),
"inconsistent target size: ", target->sizes(), " for input of size: ", input->sizes());
} else if (input->dim() == 2) {
int nframe = input->size(0);
int dim = input->size(1);
TORCH_CHECK(!target->is_empty() && (target->dim() == 2)
&& (target->size(0) == nframe) && (target->size(1) == dim),
"inconsistent target size: ", target->sizes(), " for input of size: ", input->sizes());
} else {
TORCH_CHECK(false, "non-empty vector or matrix expected, got size: ", input->sizes());
}
}
// TODO: improve error messages
void THNN_(MultiLabelMarginCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
THCTensor *istarget,
int64_t reduction)
{
THNN_(MultiLabelMarginCriterion_shapeCheck)(state, input, target);
input = THCTensor_(newContiguous)(state, input);
target = THCIndexTensor_(newContiguous)(state, target);
istarget = THCTensor_(newContiguous)(state, istarget);
THCTensor_(resizeAs)(state, istarget, target);
if(input->dim() <= 1)
{
int dim = input->dim() == 0 ? 1 : input->size(0);
int target_size = target->dim() == 0 ? 1 : target->size(0);
THCTensor_(resize0d)(state, output);
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
1, dim,
reduction == at::Reduction::Mean
);
THCudaCheck(cudaGetLastError());
}
else if(input->dim() == 2)
{
int nframe = input->size(0);
int dim = input->size(1);
dim3 blocks(input->size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
if (reduction != at::Reduction::None)
{
THCTensor *output_tmp = THCTensor_(newWithSize1d)(state, input->size(0));
THCTensor_(resize0d)(state, output);
cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output_tmp),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
nframe, dim,
reduction == at::Reduction::Mean
);
THCudaCheck(cudaGetLastError());
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(THCTensor_(sumall)(state, output_tmp)));
THCTensor_(free)(state, output_tmp);
}
else
{
THCTensor_(resize1d)(state, output, input->size(0));
cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
nframe, dim,
false
);
THCudaCheck(cudaGetLastError());
}
}
else {
TORCH_INTERNAL_ASSERT(false, "non-empty vector or matrix expected (shouldn't get here)");
}
THCTensor_(free)(state, input);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, istarget);
}
void THNN_(MultiLabelMarginCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *istarget,
int64_t reduction)
{
input = THCTensor_(newContiguous)(state, input);
target = THCIndexTensor_(newContiguous)(state, target);
istarget = THCTensor_(newContiguous)(state, istarget);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
if(gradInput->dim() <= 1)
{
int dim = gradInput->dim() == 0 ? 1 : gradInput->size(0);
int target_size = target->dim() == 0 ? 1 : target->size(0);
THArgCheck(!target->is_empty() && (target->dim() <= 1) && (target_size == dim), 3,
"inconsistent target size");
TORCH_CHECK(target->sizes() == istarget->sizes(), "inconsistent isTarget size");
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
1, dim,
reduction == at::Reduction::Mean,
reduction != at::Reduction::None);
}
else if(gradInput->dim() == 2)
{
int nframe = gradInput->size(0);
int dim = gradInput->size(1);
THArgCheck(!target->is_empty() && (target->dim() == 2) && (target->size(0) == nframe)
&& (target->size(1) == dim), 3, "inconsistent target size");
THArgCheck(!istarget->is_empty() && (istarget->dim() == 2) && (istarget->size(0) == nframe)
&& (istarget->size(1) == dim), 3, "inconsistent isTarget size");
dim3 blocks(gradInput->size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
gradInput->size(0), gradInput->size(1),
reduction == at::Reduction::Mean,
reduction != at::Reduction::None);
}
else {
AT_ERROR("non-empty vector or matrix expected, got size: ", gradInput->sizes());
}
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, input);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, istarget);
THCTensor_(free)(state, gradOutput);
}
#endif
|
f02d06189f22a7be25eac483e7274365727dc979.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
void helloCPU()
{
printf("Hello from the CPU.\n");
}
/*
* Refactor the `helloGPU` definition to be a kernel
* that can be launched on the GPU. Update its message
* to read "Hello from the GPU!"
*/
__global__ void helloGPU()
{
printf("Hello from the GPU!\n");
}
int main()
{
helloCPU();
/*
* Refactor this call to `helloGPU` so that it launches
* as a kernel on the GPU.
*/
hipLaunchKernelGGL(( helloGPU), dim3(1),dim3(1), 0, 0, );
/*
* Add code below to synchronize on the completion of the
* `helloGPU` kernel completion before continuing the CPU
* thread.
*/
hipDeviceSynchronize();
}
|
f02d06189f22a7be25eac483e7274365727dc979.cu
|
#include <stdio.h>
void helloCPU()
{
printf("Hello from the CPU.\n");
}
/*
* Refactor the `helloGPU` definition to be a kernel
* that can be launched on the GPU. Update its message
* to read "Hello from the GPU!"
*/
__global__ void helloGPU()
{
printf("Hello from the GPU!\n");
}
int main()
{
helloCPU();
/*
* Refactor this call to `helloGPU` so that it launches
* as a kernel on the GPU.
*/
helloGPU<<<1,1>>>();
/*
* Add code below to synchronize on the completion of the
* `helloGPU` kernel completion before continuing the CPU
* thread.
*/
cudaDeviceSynchronize();
}
|
76096e5c0c64f045224d691851d3f06210d3967c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cmath>
#include "device_launch_parameters.h"
// #include<sys/time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 128
#define SIZE BLOCK_SIZE*BLOCK_SIZE*GRID_SIZE*GRID_SIZE
void checkresult(float *ref, float *in, float *out, float *mul, int width) {
for (int i = 0; i < GRID_SIZE; i++) {
for (int j = 0; j < GRID_SIZE; j++) {
float sum = 0.0f;
int start = j * BLOCK_SIZE * width + i * BLOCK_SIZE;
for (int ii = 0; ii < BLOCK_SIZE; ii++) {
for (int jj = 0; jj < BLOCK_SIZE; jj++) {
sum += in[start + ii * width + jj] * mul[jj];
}
}
for (int ii = 0; ii < BLOCK_SIZE; ii++) {
for (int jj = 0; jj < BLOCK_SIZE; jj++) {
if (jj % 2 == 0 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 2.0 * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] / sum;
else if (jj % 2 == 1 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] / sum;
else if (jj % 2 == 1 && ii % 2 == 1)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = (-1.0) * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] / sum;
else
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 0.0f;
}
}
}
}
for (int i = 0; i < SIZE; i++) {
if (abs(ref[i] - out[i]) > 1.e-6) {
printf("Diff %f\n", abs(ref[i] - out[i]));
printf("results checking failed at %d ref %f out %f\n", i, ref[i], out[i]);
return;
}
}
printf("results checking passed!\n");
}
__global__ void normWithShflDown(float *in, float *out, float *mul, int width) {
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx >= width || ty >= SIZE / width) return;
int start = blockIdx.x * blockDim.x * width + blockIdx.y * blockDim.y;
float sum = 0.0f;
__syncthreads();
// perform first level of reduction,
// reading from global memory, writing to shared memory
__shared__ float sdata[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
//unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
int i = threadIdx.x;
__syncthreads();
float mySum = 0;
if (i + BLOCK_SIZE < width)
for (int j = 0; j < BLOCK_SIZE; j++) {
mySum += in[start + j + i * width] * mul[j];
}
sdata[tid] = mySum;
__syncthreads();
if ((BLOCK_SIZE >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((BLOCK_SIZE >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((BLOCK_SIZE >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((BLOCK_SIZE >= 2) && (tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
//printf("2 TID %d sum %f\n", i, mySum);
// write result for this block to global mem
//if (tid == 0) g_odata[blockIdx.x] = mySum
__shared__ float total;
if (tid == 0) total = mySum;
__syncthreads();
//if (tid == 0) printf("total is %f\n", total);
if (tx % 2 == 0 && ty % 2 == 0)
out[tx * width + ty] = 2.0 * in[tx * width + ty] / total;
else if (tx % 2 == 1 && ty % 2 == 0)
out[tx * width + ty] = in[tx * width + ty] / total;
else if (tx % 2 == 1 && ty % 2 == 1)
out[tx * width + ty] = (-1.0) * in[tx * width + ty] / total;
else
out[tx * width + ty] = 0.0f;
}
int main() {
//float *hA_in = (float *)malloc(SIZE * sizeof(float));
//float *hA_out = (float *)malloc(SIZE * sizeof(float));
//float *hB_in = (float *)malloc(BLOCK_SIZE * sizeof(float));
float *ref = (float *)malloc(SIZE * sizeof(float));
float *hA_in, *hA_out, *hB_in;
float *dA_in, *dA_out, *dB_in;
hipHostMalloc((void**)&hA_in, SIZE * sizeof(float));
hipHostMalloc((void**)&hA_out, SIZE * sizeof(float));
hipHostMalloc((void**)&hB_in, BLOCK_SIZE * sizeof(float));
srand(2016);
for (int i = 0; i < SIZE; i++) {
hA_in[i] = (float)rand() / (float)RAND_MAX;
}
for (int i = 0; i < BLOCK_SIZE; i++) {
hB_in[i] = (float)rand() / (float)RAND_MAX;
}
hipMalloc((void **)&dA_in, SIZE * sizeof(float));
hipMalloc((void **)&dA_out, SIZE * sizeof(float));
hipMalloc((void **)&dB_in, BLOCK_SIZE * sizeof(float));
hipMemcpy(dA_in, hA_in, SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dB_in, hB_in, BLOCK_SIZE * sizeof(float), hipMemcpyHostToDevice);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 grid(GRID_SIZE, GRID_SIZE, 1);
hipDeviceSynchronize();
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
normWithShflDown << <grid, block >> > (dA_in, dA_out, dB_in, BLOCK_SIZE * GRID_SIZE);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("kernel time %fs\n", milliseconds);
hipMemcpy(hA_out, dA_out, SIZE * sizeof(float), hipMemcpyDeviceToHost);
checkresult(ref, hA_in, hA_out, hB_in, BLOCK_SIZE * GRID_SIZE);
/*printf("\n");
for (int i = 0; i < SIZE; i++) {
printf("%d ", hA_out[i]);
if (i % 16 == 0) {
printf("\n");
}
}*/
}
|
76096e5c0c64f045224d691851d3f06210d3967c.cu
|
#include <cuda_runtime.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cmath>
#include "device_launch_parameters.h"
// #include<sys/time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 128
#define SIZE BLOCK_SIZE*BLOCK_SIZE*GRID_SIZE*GRID_SIZE
void checkresult(float *ref, float *in, float *out, float *mul, int width) {
for (int i = 0; i < GRID_SIZE; i++) {
for (int j = 0; j < GRID_SIZE; j++) {
float sum = 0.0f;
int start = j * BLOCK_SIZE * width + i * BLOCK_SIZE;
for (int ii = 0; ii < BLOCK_SIZE; ii++) {
for (int jj = 0; jj < BLOCK_SIZE; jj++) {
sum += in[start + ii * width + jj] * mul[jj];
}
}
for (int ii = 0; ii < BLOCK_SIZE; ii++) {
for (int jj = 0; jj < BLOCK_SIZE; jj++) {
if (jj % 2 == 0 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 2.0 * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] / sum;
else if (jj % 2 == 1 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] / sum;
else if (jj % 2 == 1 && ii % 2 == 1)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = (-1.0) * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] / sum;
else
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 0.0f;
}
}
}
}
for (int i = 0; i < SIZE; i++) {
if (abs(ref[i] - out[i]) > 1.e-6) {
printf("Diff %f\n", abs(ref[i] - out[i]));
printf("results checking failed at %d ref %f out %f\n", i, ref[i], out[i]);
return;
}
}
printf("results checking passed!\n");
}
__global__ void normWithShflDown(float *in, float *out, float *mul, int width) {
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx >= width || ty >= SIZE / width) return;
int start = blockIdx.x * blockDim.x * width + blockIdx.y * blockDim.y;
float sum = 0.0f;
__syncthreads();
// perform first level of reduction,
// reading from global memory, writing to shared memory
__shared__ float sdata[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
//unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
int i = threadIdx.x;
__syncthreads();
float mySum = 0;
if (i + BLOCK_SIZE < width)
for (int j = 0; j < BLOCK_SIZE; j++) {
mySum += in[start + j + i * width] * mul[j];
}
sdata[tid] = mySum;
__syncthreads();
if ((BLOCK_SIZE >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((BLOCK_SIZE >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((BLOCK_SIZE >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((BLOCK_SIZE >= 2) && (tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
//printf("2 TID %d sum %f\n", i, mySum);
// write result for this block to global mem
//if (tid == 0) g_odata[blockIdx.x] = mySum
__shared__ float total;
if (tid == 0) total = mySum;
__syncthreads();
//if (tid == 0) printf("total is %f\n", total);
if (tx % 2 == 0 && ty % 2 == 0)
out[tx * width + ty] = 2.0 * in[tx * width + ty] / total;
else if (tx % 2 == 1 && ty % 2 == 0)
out[tx * width + ty] = in[tx * width + ty] / total;
else if (tx % 2 == 1 && ty % 2 == 1)
out[tx * width + ty] = (-1.0) * in[tx * width + ty] / total;
else
out[tx * width + ty] = 0.0f;
}
int main() {
//float *hA_in = (float *)malloc(SIZE * sizeof(float));
//float *hA_out = (float *)malloc(SIZE * sizeof(float));
//float *hB_in = (float *)malloc(BLOCK_SIZE * sizeof(float));
float *ref = (float *)malloc(SIZE * sizeof(float));
float *hA_in, *hA_out, *hB_in;
float *dA_in, *dA_out, *dB_in;
cudaMallocHost((void**)&hA_in, SIZE * sizeof(float));
cudaMallocHost((void**)&hA_out, SIZE * sizeof(float));
cudaMallocHost((void**)&hB_in, BLOCK_SIZE * sizeof(float));
srand(2016);
for (int i = 0; i < SIZE; i++) {
hA_in[i] = (float)rand() / (float)RAND_MAX;
}
for (int i = 0; i < BLOCK_SIZE; i++) {
hB_in[i] = (float)rand() / (float)RAND_MAX;
}
cudaMalloc((void **)&dA_in, SIZE * sizeof(float));
cudaMalloc((void **)&dA_out, SIZE * sizeof(float));
cudaMalloc((void **)&dB_in, BLOCK_SIZE * sizeof(float));
cudaMemcpy(dA_in, hA_in, SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dB_in, hB_in, BLOCK_SIZE * sizeof(float), cudaMemcpyHostToDevice);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 grid(GRID_SIZE, GRID_SIZE, 1);
cudaDeviceSynchronize();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
normWithShflDown << <grid, block >> > (dA_in, dA_out, dB_in, BLOCK_SIZE * GRID_SIZE);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("kernel time %fs\n", milliseconds);
cudaMemcpy(hA_out, dA_out, SIZE * sizeof(float), cudaMemcpyDeviceToHost);
checkresult(ref, hA_in, hA_out, hB_in, BLOCK_SIZE * GRID_SIZE);
/*printf("\n");
for (int i = 0; i < SIZE; i++) {
printf("%d ", hA_out[i]);
if (i % 16 == 0) {
printf("\n");
}
}*/
}
|
e56d598d7c7a8775116c36a37737c4332c79005e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
// Print device properties
void printDevProp(hipDeviceProp_t devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
int main()
{
// Number of CUDA devices
int devCount;
hipGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
printf("\nPress any key to exit...");
char c;
scanf("%c", &c);
int *a,res;
res=hipMalloc(&a,sizeof(int));
printf("res=%d\n",res);
printf("%d\n",hipSuccess);
return 0;
}
|
e56d598d7c7a8775116c36a37737c4332c79005e.cu
|
#include <stdio.h>
// Print device properties
void printDevProp(cudaDeviceProp devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
int main()
{
// Number of CUDA devices
int devCount;
cudaGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
printf("\nPress any key to exit...");
char c;
scanf("%c", &c);
int *a,res;
res=cudaMalloc(&a,sizeof(int));
printf("res=%d\n",res);
printf("%d\n",cudaSuccess);
return 0;
}
|
d93b7e4338bbacfcf61a7941f821f4a1eca0bfc4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <cmath>
#include <stdio.h>
#include "GPUImage.h"
#include <string>
#include <thrust/extrema.h>
#include "projToneMapping.h"
using namespace tcs_cuda;
__global__
void rgb_to_xyY(float* d_r, float* d_g, float* d_b, float* d_x, float* d_y,
float* d_log_Y, float delta, int num_pixels_y, int num_pixels_x){
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if (image_index_2d.x < nx && image_index_2d.y < ny){
float r = d_r[ image_index_1d ];
float g = d_g[ image_index_1d ];
float b = d_b[ image_index_1d ];
float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f );
float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f );
float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f );
float L = X + Y + Z;
float x = X / L;
float y = Y / L;
float log_Y = log10f( delta + Y );
d_x[ image_index_1d ] = x;
d_y[ image_index_1d ] = y;
d_log_Y[ image_index_1d ] = log_Y;
}
}
__global__
void normalize_cdf(unsigned int* d_input_cdf, float* d_output_cdf, int n){
const float normalization_constant = 1.f / d_input_cdf[n - 1];
int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x;
if ( global_index_1d < n ){
unsigned int input_value = d_input_cdf[ global_index_1d ];
float output_value = input_value * normalization_constant;
d_output_cdf[ global_index_1d ] = output_value;
}
}
__global__
void tonemap(float* d_x, float* d_y, float* d_log_Y, float* d_cdf_norm,
float* d_r_new, float* d_g_new, float* d_b_new, float min_log_Y,
float max_log_Y, int num_bins, int num_pixels_y, int num_pixels_x){
float log_Y_range = max_log_Y - min_log_Y;
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny ){
float x = d_x[ image_index_1d ];
float y = d_y[ image_index_1d ];
float log_Y = d_log_Y[ image_index_1d ];
int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) );
float Y_new = d_cdf_norm[ bin_index ];
float X_new = x * ( Y_new / y );
float Z_new = ( 1 - x - y ) * ( Y_new / y );
float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f );
float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f );
float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f );
d_r_new[ image_index_1d ] = r_new;
d_g_new[ image_index_1d ] = g_new;
d_b_new[ image_index_1d ] = b_new;
}
}
__global__
void init_array(const size_t numBins, unsigned int* array){
int blockSize = 1024;
int idx = threadIdx.x + blockSize * blockIdx.x;
if(idx < numBins){
array[idx] = 0;
}
}
__global__
void create_histogram(const float* const d_logLuminance, float min_logLum, const size_t numBins, float logLumRange, unsigned int* d_bins){
int blockSize = 1024;
int idx = threadIdx.x + blockSize * blockIdx.x;
unsigned int bin = min(static_cast<unsigned int>(numBins - 1),
static_cast<unsigned int>((d_logLuminance[idx] - min_logLum) / logLumRange * numBins));
atomicAdd(&(d_bins[bin]),1);
}
__global__
void find_min(const float* const inputMin, float* minLuminance, int n){
int blockSize = blockDim.x;
int threadId = threadIdx.x;
int idx = threadIdx.x + blockSize * blockIdx.x;
int firstIdx = blockSize * blockIdx.x;
extern __shared__ float sdata[];
if(idx < n){
sdata[threadId] = inputMin[idx];
}else{
sdata[threadId] = inputMin[firstIdx];
}
__syncthreads();
for(int s = blockSize/2; s > 0; s >>=1){
if(threadId < s){
sdata[threadId] = min(sdata[threadId], sdata[threadId + s]);
}
__syncthreads();
}
if(threadId == 0){
minLuminance[blockIdx.x] = sdata[0];
}
}
__global__
void find_max(const float* const inputMax, float* maxLuminance, int n){
int blockSize = blockDim.x;
int threadId = threadIdx.x;
int idx = threadIdx.x + blockSize * blockIdx.x;
int firstIdx = blockSize * blockIdx.x;
extern __shared__ float sdata[];
if(idx < n){
sdata[threadId] = inputMax[idx];
}else{
sdata[threadId] = inputMax[firstIdx];
}
__syncthreads();
for(int s = blockSize/2; s > 0; s >>=1){
if(threadId < s){
sdata[threadId] = max(sdata[threadId], sdata[threadId + s]);
}
__syncthreads();
}
if(threadId == 0){
maxLuminance[blockIdx.x] = sdata[0];
}
}
__global__
void prefix_sum(unsigned int* input, unsigned int* block_sum, const size_t n){
int blockSize = 1024;
int threadId = threadIdx.x;
int idx = threadIdx.x + blockSize * blockIdx.x;
__shared__ float sdata[1024];
if(idx < n && threadId > 0){
sdata[threadId] = input[idx-1];
}else{
sdata[threadId] = 0;
}
__syncthreads();
//prefix sum
for (int shift = 1; shift < blockSize; shift <<= 1){
if (threadId < shift){
continue;
}
int tmp = sdata[threadId-shift];
__syncthreads();
sdata[threadId] += tmp ;
__syncthreads();
}
if(threadId + 1 == blockSize){//ostatni element w bloku
int last = input[idx];
for(int blockNo = blockIdx.x + 1; blockNo < gridDim.x; ++blockNo){
atomicAdd(&(block_sum[blockNo]),sdata[threadId] + last);
}
}
if(threadId < n){
input[idx] = sdata[threadId];
}
__syncthreads();
}
__global__
void create_cdf(unsigned int* input, unsigned int* block_sum, const size_t n, unsigned int* const d_cdf){
int blockSize = 1024;
int threadId = threadIdx.x;
int idx = threadIdx.x + blockSize * blockIdx.x;
__shared__ int add;
if(threadId == 0){
if(blockIdx.x == 0){
add = 0;
}else{
add = block_sum[blockIdx.x];
}
}
__syncthreads();
if(idx < n){
d_cdf[idx] = add + input[idx];
}
}
__global__
void split_to_channels(float* imgPtr, float* d_red, float* d_green, float* d_blue, int numPixels){
int idX = (blockIdx.x * blockDim.x) + threadIdx.x;
int idY = (blockIdx.y * blockDim.y) + threadIdx.y;
int idx = gridDim.x * blockDim.x * idY + idX;
if(idx < numPixels){
d_blue[idx] = imgPtr[3 * idx + 0];
d_green[idx] = imgPtr[3 * idx + 1];
d_red[idx] = imgPtr[3 * idx + 2];
}
}
__global__
void recombine_channels(float* imgPtr, float* d_red, float* d_green, float* d_blue, int numPixels){
int idX = (blockIdx.x * blockDim.x) + threadIdx.x;
int idY = (blockIdx.y * blockDim.y) + threadIdx.y;
int idx = gridDim.x * blockDim.x * idY + idX;
if(idx < numPixels){
imgPtr[3 * idx + 0] = d_blue[idx];
imgPtr[3 * idx + 1] = d_green[idx];
imgPtr[3 * idx + 2] = d_red[idx];
}
}
void calculate_histogram_and_cdf(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
int totalSize = numRows * numCols;
//Step 1
int blockSize = 1024;
int blocksNum = totalSize;
float* d_output;
const float* d_input = d_logLuminance;
int n = totalSize;
hipMalloc((void**) &d_output, sizeof(float) * blocksNum);
do{
blocksNum = ceil(blocksNum / (blockSize * 1.0));
hipLaunchKernelGGL(( find_min), dim3(blocksNum),dim3(blockSize), blockSize * sizeof(float), 0, d_input, d_output, n);
d_input = d_output;
n = blocksNum;
}while(blocksNum > 1);
hipMemcpy(&min_logLum,d_output,sizeof(float), hipMemcpyDeviceToHost);
blocksNum = totalSize;
d_input = d_logLuminance;
n = totalSize;
do{
blocksNum = ceil(blocksNum / (blockSize * 1.0));
hipLaunchKernelGGL(( find_max), dim3(blocksNum),dim3(blockSize), blockSize * sizeof(float), 0, d_input, d_output, n);
d_input = d_output;
n = blocksNum;
}while(blocksNum > 1);
hipMemcpy(&max_logLum,d_output,sizeof(float), hipMemcpyDeviceToHost);
hipFree((void *)d_output);
//Step 2
float logLumRange = max_logLum - min_logLum;
//Step 3
unsigned int *d_bins;
hipMalloc((void**) &d_bins, sizeof(unsigned int) * numBins);
blocksNum = ceil(numBins / (blockSize * 1.0));
hipLaunchKernelGGL(( init_array), dim3(blocksNum),dim3(blockSize), 0, 0, numBins, d_bins);
blocksNum = ceil(totalSize / (blockSize * 1.0));
hipLaunchKernelGGL(( create_histogram), dim3(blocksNum),dim3(blockSize), 0, 0, d_logLuminance, min_logLum, numBins, logLumRange, d_bins);
//Step 4
unsigned int *block_sum;
hipMalloc((void**) &block_sum, sizeof(unsigned int) * numBins);
blocksNum = ceil(numBins / (blockSize * 1.0));
hipLaunchKernelGGL(( init_array), dim3(blocksNum),dim3(blockSize), 0, 0, numBins, block_sum);
hipLaunchKernelGGL(( prefix_sum), dim3(blocksNum),dim3(blockSize), 0, 0, d_bins, block_sum, numBins);
hipLaunchKernelGGL(( create_cdf), dim3(blocksNum),dim3(blockSize), 0, 0, d_bins, block_sum, numBins, d_cdf);
hipFree((void *)d_bins);
hipFree((void *)block_sum);
}
GPUImage hdrTransform(const GPUImage& input) {
return GPUImage::createEmptyHDR(input.getWidth(), input.getHeight());
}
void projTonemapping(const GPUImage& input, GPUImage& output){
int numBins = 1024;
unsigned int* d_cdf;
size_t cdf_size = sizeof(unsigned int) * numBins;
checkCudaErrors(hipMalloc(&d_cdf, cdf_size));
checkCudaErrors(hipMemset(d_cdf, 0, cdf_size));
size_t numRows = input.getHeight();
size_t numCols = input.getWidth();
//first thing to do is split incoming BGR float data into separate channels
size_t numPixels = numRows * numCols;
int blockX = 32;
int blockY = 16;
int blockZ = 1;
const dim3 blockSize(blockX, blockY, blockZ);
int gridX = (numCols + blockSize.x - 1) / blockSize.x;//ceil(numCols / (blockX * 1.0));
int gridY = (numRows + blockSize.y - 1) / blockSize.y;//ceil(numRows / (blockY * 1.0));
int gridZ = 1;
const dim3 gridSize(gridX, gridY, gridZ);
//RGB space
float *d_red, *d_green, *d_blue;
size_t channelSize = sizeof(float) * numPixels;
checkCudaErrors(hipMalloc(&d_red, channelSize));
checkCudaErrors(hipMalloc(&d_green, channelSize));
checkCudaErrors(hipMalloc(&d_blue, channelSize));
hipLaunchKernelGGL(( split_to_channels), dim3(gridSize), dim3(blockSize), 0, 0, input.getDeviceHDRPixels(), d_red, d_green, d_blue, numPixels);
//chroma-LogLuminance Space
float *d_x, *d_y, *d_luminance;
checkCudaErrors(hipMalloc(&d_x, channelSize));
checkCudaErrors(hipMalloc(&d_y, channelSize));
checkCudaErrors(hipMalloc(&d_luminance, channelSize));
//convert from RGB space to chrominance/luminance space xyY
hipLaunchKernelGGL(( rgb_to_xyY), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_green, d_blue,d_x, d_y, d_luminance,
.0001f, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
float min_logLum = 0.f;
float max_logLum = 1.f;
//call the students' code
calculate_histogram_and_cdf(d_luminance, d_cdf, min_logLum, max_logLum, numRows, numCols, numBins);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//check results and output the tone-mapped image
const int numThreads = 192;
float *d_cdf_normalized;
checkCudaErrors(hipMalloc(&d_cdf_normalized, sizeof(float) * numBins));
//first normalize the cdf to a maximum value of 1
//this is how we compress the range of the luminance channel
hipLaunchKernelGGL(( normalize_cdf), dim3((numBins + numThreads - 1) / numThreads),
dim3( numThreads), 0, 0, d_cdf, d_cdf_normalized, numBins);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//next perform the actual tone-mapping
//we map each luminance value to its new value
//and then transform back to RGB space
hipLaunchKernelGGL(( tonemap), dim3(gridSize), dim3(blockSize), 0, 0, d_x, d_y, d_luminance, d_cdf_normalized,
d_red, d_green, d_blue, min_logLum, max_logLum,
numBins, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( recombine_channels), dim3(gridSize), dim3(blockSize), 0, 0, output.getDeviceHDRPixels(), d_red, d_green, d_blue, numPixels);
//cleanup
checkCudaErrors(hipFree(d_cdf));
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_x));
checkCudaErrors(hipFree(d_y));
checkCudaErrors(hipFree(d_luminance));
checkCudaErrors(hipFree(d_cdf_normalized));
}
|
d93b7e4338bbacfcf61a7941f821f4a1eca0bfc4.cu
|
#include "utils.h"
#include <cmath>
#include <stdio.h>
#include "GPUImage.h"
#include <string>
#include <thrust/extrema.h>
#include "projToneMapping.h"
using namespace tcs_cuda;
__global__
void rgb_to_xyY(float* d_r, float* d_g, float* d_b, float* d_x, float* d_y,
float* d_log_Y, float delta, int num_pixels_y, int num_pixels_x){
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if (image_index_2d.x < nx && image_index_2d.y < ny){
float r = d_r[ image_index_1d ];
float g = d_g[ image_index_1d ];
float b = d_b[ image_index_1d ];
float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f );
float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f );
float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f );
float L = X + Y + Z;
float x = X / L;
float y = Y / L;
float log_Y = log10f( delta + Y );
d_x[ image_index_1d ] = x;
d_y[ image_index_1d ] = y;
d_log_Y[ image_index_1d ] = log_Y;
}
}
__global__
void normalize_cdf(unsigned int* d_input_cdf, float* d_output_cdf, int n){
const float normalization_constant = 1.f / d_input_cdf[n - 1];
int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x;
if ( global_index_1d < n ){
unsigned int input_value = d_input_cdf[ global_index_1d ];
float output_value = input_value * normalization_constant;
d_output_cdf[ global_index_1d ] = output_value;
}
}
__global__
void tonemap(float* d_x, float* d_y, float* d_log_Y, float* d_cdf_norm,
float* d_r_new, float* d_g_new, float* d_b_new, float min_log_Y,
float max_log_Y, int num_bins, int num_pixels_y, int num_pixels_x){
float log_Y_range = max_log_Y - min_log_Y;
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny ){
float x = d_x[ image_index_1d ];
float y = d_y[ image_index_1d ];
float log_Y = d_log_Y[ image_index_1d ];
int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) );
float Y_new = d_cdf_norm[ bin_index ];
float X_new = x * ( Y_new / y );
float Z_new = ( 1 - x - y ) * ( Y_new / y );
float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f );
float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f );
float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f );
d_r_new[ image_index_1d ] = r_new;
d_g_new[ image_index_1d ] = g_new;
d_b_new[ image_index_1d ] = b_new;
}
}
__global__
void init_array(const size_t numBins, unsigned int* array){
int blockSize = 1024;
int idx = threadIdx.x + blockSize * blockIdx.x;
if(idx < numBins){
array[idx] = 0;
}
}
__global__
void create_histogram(const float* const d_logLuminance, float min_logLum, const size_t numBins, float logLumRange, unsigned int* d_bins){
int blockSize = 1024;
int idx = threadIdx.x + blockSize * blockIdx.x;
unsigned int bin = min(static_cast<unsigned int>(numBins - 1),
static_cast<unsigned int>((d_logLuminance[idx] - min_logLum) / logLumRange * numBins));
atomicAdd(&(d_bins[bin]),1);
}
__global__
void find_min(const float* const inputMin, float* minLuminance, int n){
int blockSize = blockDim.x;
int threadId = threadIdx.x;
int idx = threadIdx.x + blockSize * blockIdx.x;
int firstIdx = blockSize * blockIdx.x;
extern __shared__ float sdata[];
if(idx < n){
sdata[threadId] = inputMin[idx];
}else{
sdata[threadId] = inputMin[firstIdx];
}
__syncthreads();
for(int s = blockSize/2; s > 0; s >>=1){
if(threadId < s){
sdata[threadId] = min(sdata[threadId], sdata[threadId + s]);
}
__syncthreads();
}
if(threadId == 0){
minLuminance[blockIdx.x] = sdata[0];
}
}
__global__
void find_max(const float* const inputMax, float* maxLuminance, int n){
int blockSize = blockDim.x;
int threadId = threadIdx.x;
int idx = threadIdx.x + blockSize * blockIdx.x;
int firstIdx = blockSize * blockIdx.x;
extern __shared__ float sdata[];
if(idx < n){
sdata[threadId] = inputMax[idx];
}else{
sdata[threadId] = inputMax[firstIdx];
}
__syncthreads();
for(int s = blockSize/2; s > 0; s >>=1){
if(threadId < s){
sdata[threadId] = max(sdata[threadId], sdata[threadId + s]);
}
__syncthreads();
}
if(threadId == 0){
maxLuminance[blockIdx.x] = sdata[0];
}
}
__global__
void prefix_sum(unsigned int* input, unsigned int* block_sum, const size_t n){
int blockSize = 1024;
int threadId = threadIdx.x;
int idx = threadIdx.x + blockSize * blockIdx.x;
__shared__ float sdata[1024];
if(idx < n && threadId > 0){
sdata[threadId] = input[idx-1];
}else{
sdata[threadId] = 0;
}
__syncthreads();
//prefix sum
for (int shift = 1; shift < blockSize; shift <<= 1){
if (threadId < shift){
continue;
}
int tmp = sdata[threadId-shift];
__syncthreads();
sdata[threadId] += tmp ;
__syncthreads();
}
if(threadId + 1 == blockSize){//ostatni element w bloku
int last = input[idx];
for(int blockNo = blockIdx.x + 1; blockNo < gridDim.x; ++blockNo){
atomicAdd(&(block_sum[blockNo]),sdata[threadId] + last);
}
}
if(threadId < n){
input[idx] = sdata[threadId];
}
__syncthreads();
}
__global__
void create_cdf(unsigned int* input, unsigned int* block_sum, const size_t n, unsigned int* const d_cdf){
int blockSize = 1024;
int threadId = threadIdx.x;
int idx = threadIdx.x + blockSize * blockIdx.x;
__shared__ int add;
if(threadId == 0){
if(blockIdx.x == 0){
add = 0;
}else{
add = block_sum[blockIdx.x];
}
}
__syncthreads();
if(idx < n){
d_cdf[idx] = add + input[idx];
}
}
__global__
void split_to_channels(float* imgPtr, float* d_red, float* d_green, float* d_blue, int numPixels){
int idX = (blockIdx.x * blockDim.x) + threadIdx.x;
int idY = (blockIdx.y * blockDim.y) + threadIdx.y;
int idx = gridDim.x * blockDim.x * idY + idX;
if(idx < numPixels){
d_blue[idx] = imgPtr[3 * idx + 0];
d_green[idx] = imgPtr[3 * idx + 1];
d_red[idx] = imgPtr[3 * idx + 2];
}
}
__global__
void recombine_channels(float* imgPtr, float* d_red, float* d_green, float* d_blue, int numPixels){
int idX = (blockIdx.x * blockDim.x) + threadIdx.x;
int idY = (blockIdx.y * blockDim.y) + threadIdx.y;
int idx = gridDim.x * blockDim.x * idY + idX;
if(idx < numPixels){
imgPtr[3 * idx + 0] = d_blue[idx];
imgPtr[3 * idx + 1] = d_green[idx];
imgPtr[3 * idx + 2] = d_red[idx];
}
}
void calculate_histogram_and_cdf(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
int totalSize = numRows * numCols;
//Step 1
int blockSize = 1024;
int blocksNum = totalSize;
float* d_output;
const float* d_input = d_logLuminance;
int n = totalSize;
cudaMalloc((void**) &d_output, sizeof(float) * blocksNum);
do{
blocksNum = ceil(blocksNum / (blockSize * 1.0));
find_min<<<blocksNum,blockSize, blockSize * sizeof(float)>>>(d_input, d_output, n);
d_input = d_output;
n = blocksNum;
}while(blocksNum > 1);
cudaMemcpy(&min_logLum,d_output,sizeof(float), cudaMemcpyDeviceToHost);
blocksNum = totalSize;
d_input = d_logLuminance;
n = totalSize;
do{
blocksNum = ceil(blocksNum / (blockSize * 1.0));
find_max<<<blocksNum,blockSize, blockSize * sizeof(float)>>>(d_input, d_output, n);
d_input = d_output;
n = blocksNum;
}while(blocksNum > 1);
cudaMemcpy(&max_logLum,d_output,sizeof(float), cudaMemcpyDeviceToHost);
cudaFree((void *)d_output);
//Step 2
float logLumRange = max_logLum - min_logLum;
//Step 3
unsigned int *d_bins;
cudaMalloc((void**) &d_bins, sizeof(unsigned int) * numBins);
blocksNum = ceil(numBins / (blockSize * 1.0));
init_array<<<blocksNum,blockSize>>>(numBins, d_bins);
blocksNum = ceil(totalSize / (blockSize * 1.0));
create_histogram<<<blocksNum,blockSize>>>(d_logLuminance, min_logLum, numBins, logLumRange, d_bins);
//Step 4
unsigned int *block_sum;
cudaMalloc((void**) &block_sum, sizeof(unsigned int) * numBins);
blocksNum = ceil(numBins / (blockSize * 1.0));
init_array<<<blocksNum,blockSize>>>(numBins, block_sum);
prefix_sum<<<blocksNum,blockSize>>>(d_bins, block_sum, numBins);
create_cdf<<<blocksNum,blockSize>>>(d_bins, block_sum, numBins, d_cdf);
cudaFree((void *)d_bins);
cudaFree((void *)block_sum);
}
GPUImage hdrTransform(const GPUImage& input) {
return GPUImage::createEmptyHDR(input.getWidth(), input.getHeight());
}
void projTonemapping(const GPUImage& input, GPUImage& output){
int numBins = 1024;
unsigned int* d_cdf;
size_t cdf_size = sizeof(unsigned int) * numBins;
checkCudaErrors(cudaMalloc(&d_cdf, cdf_size));
checkCudaErrors(cudaMemset(d_cdf, 0, cdf_size));
size_t numRows = input.getHeight();
size_t numCols = input.getWidth();
//first thing to do is split incoming BGR float data into separate channels
size_t numPixels = numRows * numCols;
int blockX = 32;
int blockY = 16;
int blockZ = 1;
const dim3 blockSize(blockX, blockY, blockZ);
int gridX = (numCols + blockSize.x - 1) / blockSize.x;//ceil(numCols / (blockX * 1.0));
int gridY = (numRows + blockSize.y - 1) / blockSize.y;//ceil(numRows / (blockY * 1.0));
int gridZ = 1;
const dim3 gridSize(gridX, gridY, gridZ);
//RGB space
float *d_red, *d_green, *d_blue;
size_t channelSize = sizeof(float) * numPixels;
checkCudaErrors(cudaMalloc(&d_red, channelSize));
checkCudaErrors(cudaMalloc(&d_green, channelSize));
checkCudaErrors(cudaMalloc(&d_blue, channelSize));
split_to_channels<<<gridSize, blockSize>>>(input.getDeviceHDRPixels(), d_red, d_green, d_blue, numPixels);
//chroma-LogLuminance Space
float *d_x, *d_y, *d_luminance;
checkCudaErrors(cudaMalloc(&d_x, channelSize));
checkCudaErrors(cudaMalloc(&d_y, channelSize));
checkCudaErrors(cudaMalloc(&d_luminance, channelSize));
//convert from RGB space to chrominance/luminance space xyY
rgb_to_xyY<<<gridSize, blockSize>>>(d_red, d_green, d_blue,d_x, d_y, d_luminance,
.0001f, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
float min_logLum = 0.f;
float max_logLum = 1.f;
//call the students' code
calculate_histogram_and_cdf(d_luminance, d_cdf, min_logLum, max_logLum, numRows, numCols, numBins);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//check results and output the tone-mapped image
const int numThreads = 192;
float *d_cdf_normalized;
checkCudaErrors(cudaMalloc(&d_cdf_normalized, sizeof(float) * numBins));
//first normalize the cdf to a maximum value of 1
//this is how we compress the range of the luminance channel
normalize_cdf<<< (numBins + numThreads - 1) / numThreads,
numThreads>>>(d_cdf, d_cdf_normalized, numBins);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//next perform the actual tone-mapping
//we map each luminance value to its new value
//and then transform back to RGB space
tonemap<<<gridSize, blockSize>>>(d_x, d_y, d_luminance, d_cdf_normalized,
d_red, d_green, d_blue, min_logLum, max_logLum,
numBins, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
recombine_channels<<<gridSize, blockSize>>>(output.getDeviceHDRPixels(), d_red, d_green, d_blue, numPixels);
//cleanup
checkCudaErrors(cudaFree(d_cdf));
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_x));
checkCudaErrors(cudaFree(d_y));
checkCudaErrors(cudaFree(d_luminance));
checkCudaErrors(cudaFree(d_cdf_normalized));
}
|
98e3732045f184eb1b5cfe0ef178160ab2a95a2d.hip
|
// !!! This is a file automatically generated by hipify!!!
// LBM Code for 2 - D, diffusion problems, D2Q4
// Adapted from the book Lattice Boltzmann Method - Fundamentals
// and Engineering Applications with Computer Codes by A. Mohamad
// Output file can be opened by the free software ParaView
// Eric Tada, April 24th, 2019
#include "stdio.h"
#include "math.h"
#include "stdlib.h"
#include <chrono>
#include "string.h"
#include "hip/hip_runtime.h"
#define m 100 //m is the number of lattice nodes (y)
#define n 100 //n is the number of lattice nodes (x)
// Collision kernel, done in parallel
__global__ void collision(float *f1, float *f2, float *f3, float *f4, float *rho, float *omega) {
int tid = blockDim.x * blockIdx.x + threadIdx.x + (blockDim.y * blockIdx.y + threadIdx.y) * (n+1);
float feq = 0.25*rho[tid];
f1[tid] = omega[0]*feq + (1.0 - omega[0])*f1[tid];
f2[tid] = omega[0]*feq + (1.0 - omega[0])*f2[tid];
f3[tid] = omega[0]*feq + (1.0 - omega[0])*f3[tid];
f4[tid] = omega[0]*feq + (1.0 - omega[0])*f4[tid];
}
// Streaming kernel for f1 and f2, done in series for each row
__global__ void streaming12(float *f1, float *f2) {
int j = blockDim.y * blockIdx.y + threadIdx.y;
for (int i = 1; i <= n; i++) {
f1[j*(n + 1) + n - i] = f1[j*(n + 1) + n - i - 1];
f2[j*(n+1) + i - 1] = f2[j*(n + 1) + i];
}
}
// Streaming kernel for f3 and f4, done in series for each column
__global__ void streaming34(float *f3, float *f4) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
for (int j = 1; j <= m; j++) {
f3[(m-j)*(n + 1) + i] = f3[(m - j - 1)*(n + 1) + i];
f4[(j - 1)*(n + 1) + i] = f4[(j)*(n + 1) + i];
}
}
// Kernel to apply boundary conditions (1)
__global__ void bound1(float *f1, float *f2, float *f3, float *f4) {
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (j == 0 || j == n) {
}
else {
f1[j*(n + 1)] = 0.5 - f2[j*(n + 1)];
f3[j*(n + 1)] = 0.5 - f4[j*(n + 1)];
f1[j*(n + 1) + n] = 0.0;
f2[j*(n + 1) + n] = 0.0;
f3[j*(n + 1) + n] = 0.0;
f4[j*(n + 1) + n] = 0.0;
}
}
// Kernel to apply boundary conditions (2)
__global__ void bound2(float *f1, float *f2, float *f3, float *f4) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i == 0 || i == n) {
}
else {
f1[m*(n + 1) + i] = 0.0;
f2[m*(n + 1) + i] = 0.0;
f3[m*(n + 1) + i] = 0.0;
f4[m*(n + 1) + i] = 0.0;
f1[i] = f1[n + 1 + i];
f2[i] = f2[n + 1 + i];
f3[i] = f3[n + 1 + i];
f4[i] = f4[n + 1 + i];
}
}
// Kernel to update rho value
__global__ void update(float *f1, float *f2, float *f3, float *f4, float *rho) {
int tid = blockDim.x * blockIdx.x + threadIdx.x + (blockDim.y * blockIdx.y + threadIdx.y) * (n + 1);
rho[tid] = f1[tid] + f2[tid] + f3[tid] + f4[tid];
}
// CPU function to output results
void print(float x[n + 1], float y[m + 1], float rho[(n + 1)*(m + 1)], int step) {
char str[20];
sprintf(str, "step%06d.vtk", step);
FILE *res;
res = fopen(str, "w");
int i, j;
fprintf(res, "# vtk DataFile Version 3.0\r\nvtk output\r\nASCII\r\nDATASET RECTILINEAR_GRID\r\nDIMENSIONS %d %d 1\r\n\r\n", n + 1, m + 1);
fprintf(res, "X_COORDINATES %d float\r\n", n + 1);
for (i = 0; i <= n; i++) { fprintf(res, "%f ", x[i]); }
fprintf(res, "\r\nY_COORDINATES %d float\r\n", m + 1);
for (j = 0; j <= m; j++) { fprintf(res, "%f ", y[j]); }
fprintf(res, "\r\nZ_COORDINATES 1 float\r\n0\r\n\r\n");
fprintf(res, "POINT_DATA %d\r\n", (n + 1)*(m + 1));
fprintf(res, "FIELD FieldData 1\r\nv 1 %d float\r\n", (n + 1)*(m + 1));
for (j = 0; j <= n; j++) {
for (i = 0; i <= m; i++) {
fprintf(res, "%f ", rho[j*(n+1) + i]);
}
fprintf(res, "\r\n");
}
fclose(res);
}
int main() {
float f1[(n + 1)*(m + 1)], f2[(n + 1)*(m + 1)], f3[(n + 1)*(m + 1)], f4[(n + 1)*(m + 1)];
float rho[(n + 1)*(m + 1)], x[n + 1], y[m + 1];
int i, j;
float dx = 1.0;
float dy = dx;
float dt = 1.0;
x[0] = 0.0;
y[0] = 0.0;
for (i = 1; i <= n; i++) {
x[i] = x[i - 1] + dx;
}
for (j = 1; j <= m; j++) {
y[j] = y[j - 1] + dy;
}
float csq = dx*dx / (dt*dt);
float alpha = 0.25;
float omega[1];
omega[0] = 1.0 / (2.*alpha / (dt*csq) + 0.5);
float mstep = 4000;
for (j = 0; j <= m; j++) {
for (i = 0; i <= n; i++) {
rho[j*(n+1) + i] = 0.0; //initial values of the dependent variable
}
}
for (j = 0; j <= m; j++) {
for (i = 0; i <= n; i++) {
f1[j*(n + 1) + i] = 0.25*rho[j*(n + 1) + i];
f2[j*(n + 1) + i] = 0.25*rho[j*(n + 1) + i];
f3[j*(n + 1) + i] = 0.25*rho[j*(n + 1) + i];
f4[j*(n + 1) + i] = 0.25*rho[j*(n + 1) + i];
}
}
print(x, y, rho, 0);
// Create GPU variables
float *d_f1, float *d_f2, float *d_f3, float *d_f4, float *d_rho, float *d_omega;
// Allocate memory to GPU
hipMalloc((void**)&d_f1, ((n+1)*(m+1)) * sizeof(float));
hipMalloc((void**)&d_f2, ((n + 1)*(m + 1)) * sizeof(float));
hipMalloc((void**)&d_f3, ((n + 1)*(m + 1)) * sizeof(float));
hipMalloc((void**)&d_f4, ((n + 1)*(m + 1)) * sizeof(float));
hipMalloc((void**)&d_rho, ((n + 1)*(m + 1)) * sizeof(float));
hipMalloc((void**)&d_omega, (1 * sizeof(float)));
dim3 blocksij(n+1, m+1, 1), threads(1, 1, 1);
dim3 blocksi(n + 1, 1, 1);
dim3 blocksj(1, m + 1, 1);
// Copy from host to device
hipMemcpy(d_f1, f1, (n+1)*(m+1)*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_f2, f2, (n + 1)*(m + 1) * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_f3, f3, (n + 1)*(m + 1) * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_f4, f4, (n + 1)*(m + 1) * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_rho, rho, (n + 1)*(m + 1) * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_omega, omega, sizeof(float), hipMemcpyHostToDevice);
// Record start time
auto start = std::chrono::high_resolution_clock::now();
for (int kk = 1; kk <= mstep; kk++) {
//collision
collision << < blocksij, threads >> > (d_f1, d_f2, d_f3, d_f4, d_rho, d_omega);
//streaming
streaming12 << <blocksj, threads >> > (d_f1, d_f2);
streaming34 << <blocksi, threads >> > (d_f3, d_f4);
//boundary conditions
bound1 << <blocksj, threads >> > (d_f1, d_f2, d_f3, d_f4);
bound2 << <blocksi, threads >> > (d_f1, d_f2, d_f3, d_f4);
//update rho
update << < blocksij, threads >> > (d_f1, d_f2, d_f3, d_f4, d_rho);
//output result
if (kk % 20 == 0) {
hipMemcpy(rho, d_rho, (n + 1)*(m + 1) * sizeof(float), hipMemcpyDeviceToHost);
print(x, y, rho, kk);
}
}
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = finish - start;
printf("Elapsed time for LBM: %f s\n", elapsed.count());
getchar();
}
|
98e3732045f184eb1b5cfe0ef178160ab2a95a2d.cu
|
// LBM Code for 2 - D, diffusion problems, D2Q4
// Adapted from the book Lattice Boltzmann Method - Fundamentals
// and Engineering Applications with Computer Codes by A. Mohamad
// Output file can be opened by the free software ParaView
// Eric Tada, April 24th, 2019
#include "stdio.h"
#include "math.h"
#include "stdlib.h"
#include <chrono>
#include "string.h"
#include "cuda_runtime.h"
#define m 100 //m is the number of lattice nodes (y)
#define n 100 //n is the number of lattice nodes (x)
// Collision kernel, done in parallel
__global__ void collision(float *f1, float *f2, float *f3, float *f4, float *rho, float *omega) {
int tid = blockDim.x * blockIdx.x + threadIdx.x + (blockDim.y * blockIdx.y + threadIdx.y) * (n+1);
float feq = 0.25*rho[tid];
f1[tid] = omega[0]*feq + (1.0 - omega[0])*f1[tid];
f2[tid] = omega[0]*feq + (1.0 - omega[0])*f2[tid];
f3[tid] = omega[0]*feq + (1.0 - omega[0])*f3[tid];
f4[tid] = omega[0]*feq + (1.0 - omega[0])*f4[tid];
}
// Streaming kernel for f1 and f2, done in series for each row
__global__ void streaming12(float *f1, float *f2) {
int j = blockDim.y * blockIdx.y + threadIdx.y;
for (int i = 1; i <= n; i++) {
f1[j*(n + 1) + n - i] = f1[j*(n + 1) + n - i - 1];
f2[j*(n+1) + i - 1] = f2[j*(n + 1) + i];
}
}
// Streaming kernel for f3 and f4, done in series for each column
__global__ void streaming34(float *f3, float *f4) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
for (int j = 1; j <= m; j++) {
f3[(m-j)*(n + 1) + i] = f3[(m - j - 1)*(n + 1) + i];
f4[(j - 1)*(n + 1) + i] = f4[(j)*(n + 1) + i];
}
}
// Kernel to apply boundary conditions (1)
__global__ void bound1(float *f1, float *f2, float *f3, float *f4) {
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (j == 0 || j == n) {
}
else {
f1[j*(n + 1)] = 0.5 - f2[j*(n + 1)];
f3[j*(n + 1)] = 0.5 - f4[j*(n + 1)];
f1[j*(n + 1) + n] = 0.0;
f2[j*(n + 1) + n] = 0.0;
f3[j*(n + 1) + n] = 0.0;
f4[j*(n + 1) + n] = 0.0;
}
}
// Kernel to apply boundary conditions (2)
__global__ void bound2(float *f1, float *f2, float *f3, float *f4) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i == 0 || i == n) {
}
else {
f1[m*(n + 1) + i] = 0.0;
f2[m*(n + 1) + i] = 0.0;
f3[m*(n + 1) + i] = 0.0;
f4[m*(n + 1) + i] = 0.0;
f1[i] = f1[n + 1 + i];
f2[i] = f2[n + 1 + i];
f3[i] = f3[n + 1 + i];
f4[i] = f4[n + 1 + i];
}
}
// Kernel to update rho value
__global__ void update(float *f1, float *f2, float *f3, float *f4, float *rho) {
int tid = blockDim.x * blockIdx.x + threadIdx.x + (blockDim.y * blockIdx.y + threadIdx.y) * (n + 1);
rho[tid] = f1[tid] + f2[tid] + f3[tid] + f4[tid];
}
// CPU function to output results
void print(float x[n + 1], float y[m + 1], float rho[(n + 1)*(m + 1)], int step) {
char str[20];
sprintf(str, "step%06d.vtk", step);
FILE *res;
res = fopen(str, "w");
int i, j;
fprintf(res, "# vtk DataFile Version 3.0\r\nvtk output\r\nASCII\r\nDATASET RECTILINEAR_GRID\r\nDIMENSIONS %d %d 1\r\n\r\n", n + 1, m + 1);
fprintf(res, "X_COORDINATES %d float\r\n", n + 1);
for (i = 0; i <= n; i++) { fprintf(res, "%f ", x[i]); }
fprintf(res, "\r\nY_COORDINATES %d float\r\n", m + 1);
for (j = 0; j <= m; j++) { fprintf(res, "%f ", y[j]); }
fprintf(res, "\r\nZ_COORDINATES 1 float\r\n0\r\n\r\n");
fprintf(res, "POINT_DATA %d\r\n", (n + 1)*(m + 1));
fprintf(res, "FIELD FieldData 1\r\nv 1 %d float\r\n", (n + 1)*(m + 1));
for (j = 0; j <= n; j++) {
for (i = 0; i <= m; i++) {
fprintf(res, "%f ", rho[j*(n+1) + i]);
}
fprintf(res, "\r\n");
}
fclose(res);
}
int main() {
float f1[(n + 1)*(m + 1)], f2[(n + 1)*(m + 1)], f3[(n + 1)*(m + 1)], f4[(n + 1)*(m + 1)];
float rho[(n + 1)*(m + 1)], x[n + 1], y[m + 1];
int i, j;
float dx = 1.0;
float dy = dx;
float dt = 1.0;
x[0] = 0.0;
y[0] = 0.0;
for (i = 1; i <= n; i++) {
x[i] = x[i - 1] + dx;
}
for (j = 1; j <= m; j++) {
y[j] = y[j - 1] + dy;
}
float csq = dx*dx / (dt*dt);
float alpha = 0.25;
float omega[1];
omega[0] = 1.0 / (2.*alpha / (dt*csq) + 0.5);
float mstep = 4000;
for (j = 0; j <= m; j++) {
for (i = 0; i <= n; i++) {
rho[j*(n+1) + i] = 0.0; //initial values of the dependent variable
}
}
for (j = 0; j <= m; j++) {
for (i = 0; i <= n; i++) {
f1[j*(n + 1) + i] = 0.25*rho[j*(n + 1) + i];
f2[j*(n + 1) + i] = 0.25*rho[j*(n + 1) + i];
f3[j*(n + 1) + i] = 0.25*rho[j*(n + 1) + i];
f4[j*(n + 1) + i] = 0.25*rho[j*(n + 1) + i];
}
}
print(x, y, rho, 0);
// Create GPU variables
float *d_f1, float *d_f2, float *d_f3, float *d_f4, float *d_rho, float *d_omega;
// Allocate memory to GPU
cudaMalloc((void**)&d_f1, ((n+1)*(m+1)) * sizeof(float));
cudaMalloc((void**)&d_f2, ((n + 1)*(m + 1)) * sizeof(float));
cudaMalloc((void**)&d_f3, ((n + 1)*(m + 1)) * sizeof(float));
cudaMalloc((void**)&d_f4, ((n + 1)*(m + 1)) * sizeof(float));
cudaMalloc((void**)&d_rho, ((n + 1)*(m + 1)) * sizeof(float));
cudaMalloc((void**)&d_omega, (1 * sizeof(float)));
dim3 blocksij(n+1, m+1, 1), threads(1, 1, 1);
dim3 blocksi(n + 1, 1, 1);
dim3 blocksj(1, m + 1, 1);
// Copy from host to device
cudaMemcpy(d_f1, f1, (n+1)*(m+1)*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_f2, f2, (n + 1)*(m + 1) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_f3, f3, (n + 1)*(m + 1) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_f4, f4, (n + 1)*(m + 1) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_rho, rho, (n + 1)*(m + 1) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_omega, omega, sizeof(float), cudaMemcpyHostToDevice);
// Record start time
auto start = std::chrono::high_resolution_clock::now();
for (int kk = 1; kk <= mstep; kk++) {
//collision
collision << < blocksij, threads >> > (d_f1, d_f2, d_f3, d_f4, d_rho, d_omega);
//streaming
streaming12 << <blocksj, threads >> > (d_f1, d_f2);
streaming34 << <blocksi, threads >> > (d_f3, d_f4);
//boundary conditions
bound1 << <blocksj, threads >> > (d_f1, d_f2, d_f3, d_f4);
bound2 << <blocksi, threads >> > (d_f1, d_f2, d_f3, d_f4);
//update rho
update << < blocksij, threads >> > (d_f1, d_f2, d_f3, d_f4, d_rho);
//output result
if (kk % 20 == 0) {
cudaMemcpy(rho, d_rho, (n + 1)*(m + 1) * sizeof(float), cudaMemcpyDeviceToHost);
print(x, y, rho, kk);
}
}
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = finish - start;
printf("Elapsed time for LBM: %f s\n", elapsed.count());
getchar();
}
|
1646a24b4155b56a8fa67b55a02391e1ef012755.hip
|
// !!! This is a file automatically generated by hipify!!!
/*!
* \file
* \brief Initializes and calculates required GPU memory
*/
#include "gpu_memory.cuh"
#ifdef GENERATE_DOCS
//put this in the Oregonator namespace for documentation
namespace oregonator_cu {
#endif
/**
* \brief Calculates and returns the total memory size (in bytes) required by an individual thread for the
* mechanism_memory struct.
*/
size_t required_mechanism_size() {
//returns the total required size for the mechanism per thread
size_t mech_size = 0;
//state vector y
mech_size += NSP;
//dydt vector
mech_size += NSP;
//Jacobian
mech_size += NSP * NSP;
//and mu parameter
mech_size += 1;
return mech_size * sizeof(double);
}
/**
* \brief Initializes the host and device mechanism_memory structs. This is required in order to enable
* passing the struct to CUDA
* \param[in] padded The padded number of threads to be used by the CUDA solver
* \param[in,out] h_mem The host version of the mechanism_memory struct to initialize.
* \param[in,out] d_mem The device version of the mechanism_memory struct to copy the resulting host mechanism_memory struct to.
*/
void initialize_gpu_memory(int padded, mechanism_memory** h_mem, mechanism_memory** d_mem)
{
// Allocate storage for the device struct
cudaErrorCheck( hipMalloc(d_mem, sizeof(mechanism_memory)) );
//allocate the device arrays on the host pointer
cudaErrorCheck( hipMalloc(&((*h_mem)->y), NSP * padded * sizeof(double)) );
cudaErrorCheck( hipMalloc(&((*h_mem)->dy), NSP * padded * sizeof(double)) );
cudaErrorCheck( hipMalloc(&((*h_mem)->var), 1 * padded * sizeof(double)) );
cudaErrorCheck( hipMalloc(&((*h_mem)->jac), NSP * NSP * padded * sizeof(double)) );
// set non-initialized values to zero
cudaErrorCheck( hipMemset((*h_mem)->dy, 0, NSP * padded * sizeof(double)) );
cudaErrorCheck( hipMemset((*h_mem)->jac, 0, NSP * NSP * padded * sizeof(double)) );
// and copy to device pointer
cudaErrorCheck( hipMemcpy(*d_mem, *h_mem, sizeof(mechanism_memory), hipMemcpyHostToDevice) );
}
/**
* \brief Frees the host and device mechanism_memory structs
* \param[in,out] h_mem The host version of the mechanism_memory struct.
* \param[in,out] d_mem The device version of the mechanism_memory struct.
*/
void free_gpu_memory(mechanism_memory** h_mem, mechanism_memory** d_mem)
{
cudaErrorCheck(hipFree((*h_mem)->y));
cudaErrorCheck(hipFree((*h_mem)->dy));
cudaErrorCheck(hipFree((*h_mem)->var));
cudaErrorCheck(hipFree((*h_mem)->jac));
cudaErrorCheck(hipFree(*d_mem));
}
#ifdef GENERATE_DOCS
}
#endif
|
1646a24b4155b56a8fa67b55a02391e1ef012755.cu
|
/*!
* \file
* \brief Initializes and calculates required GPU memory
*/
#include "gpu_memory.cuh"
#ifdef GENERATE_DOCS
//put this in the Oregonator namespace for documentation
namespace oregonator_cu {
#endif
/**
* \brief Calculates and returns the total memory size (in bytes) required by an individual thread for the
* mechanism_memory struct.
*/
size_t required_mechanism_size() {
//returns the total required size for the mechanism per thread
size_t mech_size = 0;
//state vector y
mech_size += NSP;
//dydt vector
mech_size += NSP;
//Jacobian
mech_size += NSP * NSP;
//and mu parameter
mech_size += 1;
return mech_size * sizeof(double);
}
/**
* \brief Initializes the host and device mechanism_memory structs. This is required in order to enable
* passing the struct to CUDA
* \param[in] padded The padded number of threads to be used by the CUDA solver
* \param[in,out] h_mem The host version of the mechanism_memory struct to initialize.
* \param[in,out] d_mem The device version of the mechanism_memory struct to copy the resulting host mechanism_memory struct to.
*/
void initialize_gpu_memory(int padded, mechanism_memory** h_mem, mechanism_memory** d_mem)
{
// Allocate storage for the device struct
cudaErrorCheck( cudaMalloc(d_mem, sizeof(mechanism_memory)) );
//allocate the device arrays on the host pointer
cudaErrorCheck( cudaMalloc(&((*h_mem)->y), NSP * padded * sizeof(double)) );
cudaErrorCheck( cudaMalloc(&((*h_mem)->dy), NSP * padded * sizeof(double)) );
cudaErrorCheck( cudaMalloc(&((*h_mem)->var), 1 * padded * sizeof(double)) );
cudaErrorCheck( cudaMalloc(&((*h_mem)->jac), NSP * NSP * padded * sizeof(double)) );
// set non-initialized values to zero
cudaErrorCheck( cudaMemset((*h_mem)->dy, 0, NSP * padded * sizeof(double)) );
cudaErrorCheck( cudaMemset((*h_mem)->jac, 0, NSP * NSP * padded * sizeof(double)) );
// and copy to device pointer
cudaErrorCheck( cudaMemcpy(*d_mem, *h_mem, sizeof(mechanism_memory), cudaMemcpyHostToDevice) );
}
/**
* \brief Frees the host and device mechanism_memory structs
* \param[in,out] h_mem The host version of the mechanism_memory struct.
* \param[in,out] d_mem The device version of the mechanism_memory struct.
*/
void free_gpu_memory(mechanism_memory** h_mem, mechanism_memory** d_mem)
{
cudaErrorCheck(cudaFree((*h_mem)->y));
cudaErrorCheck(cudaFree((*h_mem)->dy));
cudaErrorCheck(cudaFree((*h_mem)->var));
cudaErrorCheck(cudaFree((*h_mem)->jac));
cudaErrorCheck(cudaFree(*d_mem));
}
#ifdef GENERATE_DOCS
}
#endif
|
07b5fee3e0dc06dcfaf2f5225fdcce2fa9076ffb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/quantized/fake_quant_affine.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <thrust/tuple.h>
#include <cmath>
/* Fake quantize a tensor
Args:
output: output tensor.
input : input tensor.
sc: scale to quantize the input tensor to
zero_point: zero_point
quant_min: minimum quantized value
quant_max: maximum quantized value
Returns:
Fake quantized tensor (float dtype).
*/
namespace at {
namespace native {
void fake_quantize_tensor_cachemask_kernel_cuda(
Tensor& output,
Tensor& mask,
const Tensor& input,
float scale,
int64_t zero_point,
int64_t quant_min,
int64_t quant_max) {
float inv_scale = 1.0f / scale;
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.add_output(output)
.add_output(mask)
.add_input(input)
.build();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "fake_quantize_tensor_cachemask_kernel_types", [&] {
gpu_kernel_multiple_outputs(
iter,
[=] GPU_LAMBDA (scalar_t input_val) -> thrust::tuple<scalar_t, bool> {
const auto qval = static_cast<int64_t>(std::nearbyint(input_val * inv_scale) + zero_point);
return {
// fake_quantized value
(fminf(quant_max, fmaxf(quant_min, qval)) - zero_point) * scale,
// mask for grad
((quant_min <= qval) && (qval <= quant_max))
};
}
);
});
}
void _fake_quantize_grad_learnable_tensor_kernel_cuda(
TensorIterator& iter,
float scale,
float inv_scale,
int64_t zero_point,
int64_t quant_min,
int64_t quant_max,
float grad_factor) {
float dscale_small = quant_min - zero_point;
float dscale_big = quant_max - zero_point;
gpu_kernel_multiple_outputs(
iter, [=] GPU_LAMBDA (float XInput, float dYInput) -> thrust::tuple<float, float, float> {
float dXOutput, dZeroPointOutput, dScaleOutput;
int64_t xq = std::nearbyint(XInput * inv_scale) + zero_point;
dXOutput = dYInput * (xq >= quant_min && xq <= quant_max);
float xfq = static_cast<float>((::max(::min(xq, quant_max), quant_min) - zero_point) * scale);
if (xq < quant_min || xq > quant_max) {
dZeroPointOutput = (dYInput) * (-1) * scale * grad_factor;
dScaleOutput = ((xq < quant_min) ? (dYInput * dscale_small) : (dYInput * dscale_big)) * grad_factor;
} else {
dZeroPointOutput = 0;
dScaleOutput = (dYInput) * (xfq - (XInput)) * inv_scale * grad_factor;
}
return {dXOutput, dScaleOutput, dZeroPointOutput};
});
}
REGISTER_DISPATCH(fake_quant_tensor_cachemask_stub, &fake_quantize_tensor_cachemask_kernel_cuda);
REGISTER_DISPATCH(fake_quant_grad_learnable_tensor_stub, &_fake_quantize_grad_learnable_tensor_kernel_cuda);
// Fake quantize per channel
void fake_quant_per_channel_cachemask_cuda(
TensorIterator &iter, TensorIterator &iter_mask, int64_t quant_min, int64_t quant_max) {
// TODO(future, optional): read once, write twice. Not done at the moment
// for simplicity, as we do not expect this to be a bottleneck.
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "fake_quantize_channel_cachemask_cuda_mask_type_handling", [&] {
// write mask
gpu_kernel(iter_mask,
[=] GPU_LAMBDA (scalar_t input_val, float scale, int64_t zero_point) -> bool {
float inv_scale = 1.0f / scale;
const auto qval = static_cast<int64_t>(std::nearbyint(input_val * inv_scale) + zero_point);
return ((quant_min <= qval) && (qval <= quant_max));
});
// write fake_quant
gpu_kernel(iter,
[=] GPU_LAMBDA (scalar_t input_val, float scale, int64_t zero_point) -> scalar_t {
float inv_scale = 1.0f / scale;
return (fminf(
quant_max,
fmaxf(
quant_min,
static_cast<int64_t>(
std::nearbyint(input_val * inv_scale) +
zero_point))) -
zero_point) *
scale;
});
});
}
void _fake_quantize_grad_learnable_channel_kernel_cuda(TensorIterator &iter, int64_t quant_min, int64_t quant_max, float grad_factor) {
gpu_kernel_multiple_outputs(iter,
[=] GPU_LAMBDA (float x_input, float dy_input, float scale_input, float zero_point_input) -> thrust::tuple<float, float, float> {
float dx_output, dscale_output, dzero_point_output;
float inv_scale = 1.0f / scale_input;
float dscale_small = quant_min - zero_point_input;
float dscale_big = quant_max - zero_point_input;
// Calculate gradients for X.
int64_t xqi = std::nearbyint(x_input * inv_scale) + static_cast<int64_t>(zero_point_input);
dx_output = dy_input * (xqi >= quant_min && xqi <= quant_max);
// Calculate gradients for scale and zero point.
float xfqi = static_cast<float>((::max(::min(xqi, quant_max), quant_min) - zero_point_input) * scale_input);
if (xqi < quant_min || xqi > quant_max) {
dzero_point_output = dy_input * (-1) * scale_input * grad_factor;
dscale_output = ((xqi < quant_min) ? (dy_input * dscale_small) : (dy_input * dscale_big)) * grad_factor;
} else {
dzero_point_output = 0;
dscale_output = dy_input * (xfqi - x_input) * inv_scale * grad_factor;
}
return {dx_output, dscale_output, dzero_point_output};
});
}
REGISTER_DISPATCH(fake_quant_per_channel_cachemask_stub, &fake_quant_per_channel_cachemask_cuda);
REGISTER_DISPATCH(fake_quant_grad_learnable_channel_stub, &_fake_quantize_grad_learnable_channel_kernel_cuda);
} // namespace native
} // namespace at
|
07b5fee3e0dc06dcfaf2f5225fdcce2fa9076ffb.cu
|
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/quantized/fake_quant_affine.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <thrust/tuple.h>
#include <cmath>
/* Fake quantize a tensor
Args:
output: output tensor.
input : input tensor.
sc: scale to quantize the input tensor to
zero_point: zero_point
quant_min: minimum quantized value
quant_max: maximum quantized value
Returns:
Fake quantized tensor (float dtype).
*/
namespace at {
namespace native {
void fake_quantize_tensor_cachemask_kernel_cuda(
Tensor& output,
Tensor& mask,
const Tensor& input,
float scale,
int64_t zero_point,
int64_t quant_min,
int64_t quant_max) {
float inv_scale = 1.0f / scale;
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.add_output(output)
.add_output(mask)
.add_input(input)
.build();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "fake_quantize_tensor_cachemask_kernel_types", [&] {
gpu_kernel_multiple_outputs(
iter,
[=] GPU_LAMBDA (scalar_t input_val) -> thrust::tuple<scalar_t, bool> {
const auto qval = static_cast<int64_t>(std::nearbyint(input_val * inv_scale) + zero_point);
return {
// fake_quantized value
(fminf(quant_max, fmaxf(quant_min, qval)) - zero_point) * scale,
// mask for grad
((quant_min <= qval) && (qval <= quant_max))
};
}
);
});
}
void _fake_quantize_grad_learnable_tensor_kernel_cuda(
TensorIterator& iter,
float scale,
float inv_scale,
int64_t zero_point,
int64_t quant_min,
int64_t quant_max,
float grad_factor) {
float dscale_small = quant_min - zero_point;
float dscale_big = quant_max - zero_point;
gpu_kernel_multiple_outputs(
iter, [=] GPU_LAMBDA (float XInput, float dYInput) -> thrust::tuple<float, float, float> {
float dXOutput, dZeroPointOutput, dScaleOutput;
int64_t xq = std::nearbyint(XInput * inv_scale) + zero_point;
dXOutput = dYInput * (xq >= quant_min && xq <= quant_max);
float xfq = static_cast<float>((std::max(std::min(xq, quant_max), quant_min) - zero_point) * scale);
if (xq < quant_min || xq > quant_max) {
dZeroPointOutput = (dYInput) * (-1) * scale * grad_factor;
dScaleOutput = ((xq < quant_min) ? (dYInput * dscale_small) : (dYInput * dscale_big)) * grad_factor;
} else {
dZeroPointOutput = 0;
dScaleOutput = (dYInput) * (xfq - (XInput)) * inv_scale * grad_factor;
}
return {dXOutput, dScaleOutput, dZeroPointOutput};
});
}
REGISTER_DISPATCH(fake_quant_tensor_cachemask_stub, &fake_quantize_tensor_cachemask_kernel_cuda);
REGISTER_DISPATCH(fake_quant_grad_learnable_tensor_stub, &_fake_quantize_grad_learnable_tensor_kernel_cuda);
// Fake quantize per channel
void fake_quant_per_channel_cachemask_cuda(
TensorIterator &iter, TensorIterator &iter_mask, int64_t quant_min, int64_t quant_max) {
// TODO(future, optional): read once, write twice. Not done at the moment
// for simplicity, as we do not expect this to be a bottleneck.
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "fake_quantize_channel_cachemask_cuda_mask_type_handling", [&] {
// write mask
gpu_kernel(iter_mask,
[=] GPU_LAMBDA (scalar_t input_val, float scale, int64_t zero_point) -> bool {
float inv_scale = 1.0f / scale;
const auto qval = static_cast<int64_t>(std::nearbyint(input_val * inv_scale) + zero_point);
return ((quant_min <= qval) && (qval <= quant_max));
});
// write fake_quant
gpu_kernel(iter,
[=] GPU_LAMBDA (scalar_t input_val, float scale, int64_t zero_point) -> scalar_t {
float inv_scale = 1.0f / scale;
return (fminf(
quant_max,
fmaxf(
quant_min,
static_cast<int64_t>(
std::nearbyint(input_val * inv_scale) +
zero_point))) -
zero_point) *
scale;
});
});
}
void _fake_quantize_grad_learnable_channel_kernel_cuda(TensorIterator &iter, int64_t quant_min, int64_t quant_max, float grad_factor) {
gpu_kernel_multiple_outputs(iter,
[=] GPU_LAMBDA (float x_input, float dy_input, float scale_input, float zero_point_input) -> thrust::tuple<float, float, float> {
float dx_output, dscale_output, dzero_point_output;
float inv_scale = 1.0f / scale_input;
float dscale_small = quant_min - zero_point_input;
float dscale_big = quant_max - zero_point_input;
// Calculate gradients for X.
int64_t xqi = std::nearbyint(x_input * inv_scale) + static_cast<int64_t>(zero_point_input);
dx_output = dy_input * (xqi >= quant_min && xqi <= quant_max);
// Calculate gradients for scale and zero point.
float xfqi = static_cast<float>((std::max(std::min(xqi, quant_max), quant_min) - zero_point_input) * scale_input);
if (xqi < quant_min || xqi > quant_max) {
dzero_point_output = dy_input * (-1) * scale_input * grad_factor;
dscale_output = ((xqi < quant_min) ? (dy_input * dscale_small) : (dy_input * dscale_big)) * grad_factor;
} else {
dzero_point_output = 0;
dscale_output = dy_input * (xfqi - x_input) * inv_scale * grad_factor;
}
return {dx_output, dscale_output, dzero_point_output};
});
}
REGISTER_DISPATCH(fake_quant_per_channel_cachemask_stub, &fake_quant_per_channel_cachemask_cuda);
REGISTER_DISPATCH(fake_quant_grad_learnable_channel_stub, &_fake_quantize_grad_learnable_channel_kernel_cuda);
} // namespace native
} // namespace at
|
4a853acb35fa56e4bdd8634c89b92d45c12493ab.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** Diagonal-major layout Soft-DTW kernels
* @file soft_dtw_diagonal_major.cu
* @author Alex Kyllo
* @date 2021-03-14
*/
#include "helper_functions.cuh"
#include "soft_dtw_diagonal_major.cuh"
#include <stdio.h>
void print_diag(const char *X, const uint m, const uint n)
{
for (uint k = 0; k < m + n - 1; k++)
{
for (uint j = 0; j <= k; j++)
{
uint i = k - j;
if (i < m && j < n)
{
std::cout << X[i * n + j] << " ";
}
}
std::cout << "\n";
}
}
/** Kernel function for converting a matrix from row major to antidiagonal-major
* layout.
* @param D The input matrix of dimension m x n
* @param DD The output matrix of dimension (m+n-1) x min(m,n)
* @param m The height of the input matrix (rows)
* @param n The width of the input matrix (columns)
*/
__global__ void convert_diagonal(float *D, float *DD, uint m, uint n)
{
const uint tx = blockIdx.x * blockDim.x + threadIdx.x;
uint j = tx % n;
uint i = (tx - j) / n;
// new i is the antidiagonal ordinal, sum of i and j
uint dest_i = i + j;
// new j = j if in upper left half, else j-dist from leading antidiagonal
uint dest_j = j - max(0, (int)dest_i - (int)m + 1);
DD[dest_i * m + dest_j] = D[i * n + j];
}
/** Kernel function for computing "naive" Soft DTW on pairwise Euclidean
* distance matrix for multivariate time series with CUDA. Input D should be a
* __device__ array.
* This version assumes D is a diagonal-major array where m and n are the
* dimensions of the original row-major array. m x n becomes (m+n-1) x min(m,n).
* It also assumes R is a diagonal-major array where (m+2) and (n+2) are the
* dimensions of the original row-major array.
* (m+2) x (n+2) becomes (m+n+3) x min(m+2,n+2)
* This naive version only works for sequence lengths <= 1024 i.e. can fit in
* a single threadblock.
* Assumes only a single threadblock in the kernel launch.
* Each thread can process one anti-diagonal.
* @param D The pairwise squared Euclidean distance array of two time series
* @param R An m+2 x n+2 array that will be filled with the alignments
* @param cost The total path cost will be written to this address
* @param m Length of first time series
* @param n Length of second time series
* @param gamma SoftDTW smoothing parameter
*/
__global__ void softdtw_diagonal_kernel(float *D, float *R, float *cost, uint m,
uint n, float gamma)
{
const uint tx = threadIdx.x;
const uint bd = blockDim.x;
// block size = min(m, n) (length of longest diagonal)
// number of antidiagonals is m+n-1
// D is now (m+n-1) x min(m,n)
// R is now (m+n+3) x min(m+1,n+1)
const uint passes = m + n - 1;
for (uint p = 0; p < passes; p++)
{
uint ii = max(0, (int)p - (int)tx);
uint past_mid = max(0, (int)p - (int)bd + 1);
uint i = ii + 1 - past_mid;
uint j = tx + 1 + past_mid;
if (tx + ii <= p && j <= n)
{
// convert i,j to diagonal-major coordinates
// new j = j if in upper left half, else j-dist from leading
// antidiagonal
uint di = (i - 1) + (j - 1);
uint dj = j - 1 - past_mid;
uint ri = i + j;
uint rj = j - past_mid;
uint r1j = rj - 1;
uint r2j = rj - 1;
uint r3j = rj;
// If we are past the antidiagonal, need to increment the previous
// cell references
if (p >= bd)
{
r1j++;
r2j++;
r3j++;
}
if (p > bd)
{
r1j++;
}
float cost = D[di * bd + dj];
float r1 = R[di * (bd + 2) + r1j];
float r2 = R[(ri - 1) * (bd + 2) + r2j];
float r3 = R[(ri - 1) * (bd + 2) + r3j];
double prev_min = softmin(r1, r2, r3, gamma);
R[ri * (bd + 2) + rj] = cost + prev_min;
}
__syncthreads();
}
if (tx == 0)
{
*cost = R[(m + n) * (bd + 2) + 1];
}
}
/** Kernel function for computing "naive" Soft DTW on pairwise Euclidean
* distance matrix for multiple distance matrices of multivariate time series
* with CUDA. Input D should be a __device__ array.
* This version assumes D is an array of diagonal-major arrays where m and n are
* the dimensions of the original row-major array. m x n becomes (m+n-1) x
* min(m,n). It also assumes R is a diagonal-major array where (m+2) and (n+2)
* are the dimensions of the original row-major array. (m+2) x (n+2) becomes
* (m+n+3) x min(m+2,n+2) This naive version only works for sequence lengths <=
* 1024 i.e. can fit in a single threadblock. Assumes only a single threadblock
* in the kernel launch. Each thread can process one anti-diagonal.
* @param D The pairwise squared Euclidean distance array of two time series
* @param R An m+2 x n+2 array that will be filled with the alignments
* @param cost The total path cost will be written to this address
* @param nD The number of distance matrices in D
* @param m Length of first time series
* @param n Length of second time series
* @param gamma SoftDTW smoothing parameter
*/
__global__ void softdtw_diagonal_kernel_multi(float *D, float *R, float *cost,
uint nD, uint m, uint n,
float gamma)
{
const uint tx = threadIdx.x;
const uint bd = blockDim.x;
const uint bx = blockIdx.x;
const uint bD = bx * (m + n - 1) * min(m, n);
const uint bD2 = bx * (m + n + 3) * min(m + 2, n + 2);
// block size = min(m, n) (length of longest diagonal)
// number of antidiagonals is m+n-1
// D is now (m+n-1) x min(m,n)
// R is now (m+n+3) x min(m+1,n+1)
const uint passes = m + n - 1;
for (uint p = 0; p < passes; p++)
{
uint ii = max(0, (int)p - (int)tx);
uint past_mid = max(0, (int)p - (int)bd + 1);
uint i = ii + 1 - past_mid;
uint j = tx + 1 + past_mid;
if (tx + ii <= p && j <= n)
{
// convert i,j to diagonal-major coordinates
// new j = j if in upper left half, else j-dist from leading
// antidiagonal
uint di = (i - 1) + (j - 1);
uint dj = j - 1 - past_mid;
uint ri = i + j;
uint rj = j - past_mid;
uint r1j = rj - 1;
uint r2j = rj - 1;
uint r3j = rj;
// If we are past the antidiagonal, need to increment the previous
// cell references
if (p >= bd)
{
r1j++;
r2j++;
r3j++;
}
if (p > bd)
{
r1j++;
}
float cost = D[bD + di * bd + dj];
float r1 = R[bD2 + di * (bd + 2) + r1j];
float r2 = R[bD2 + (ri - 1) * (bd + 2) + r2j];
float r3 = R[bD2 + (ri - 1) * (bd + 2) + r3j];
double prev_min = softmin(r1, r2, r3, gamma);
R[bD2 + ri * (bd + 2) + rj] = cost + prev_min;
}
__syncthreads();
}
if (tx == 0)
{
cost[bx] = R[bD2 + (m + n) * (bd + 2) + 1];
}
}
|
4a853acb35fa56e4bdd8634c89b92d45c12493ab.cu
|
/** Diagonal-major layout Soft-DTW kernels
* @file soft_dtw_diagonal_major.cu
* @author Alex Kyllo
* @date 2021-03-14
*/
#include "helper_functions.cuh"
#include "soft_dtw_diagonal_major.cuh"
#include <stdio.h>
void print_diag(const char *X, const uint m, const uint n)
{
for (uint k = 0; k < m + n - 1; k++)
{
for (uint j = 0; j <= k; j++)
{
uint i = k - j;
if (i < m && j < n)
{
std::cout << X[i * n + j] << " ";
}
}
std::cout << "\n";
}
}
/** Kernel function for converting a matrix from row major to antidiagonal-major
* layout.
* @param D The input matrix of dimension m x n
* @param DD The output matrix of dimension (m+n-1) x min(m,n)
* @param m The height of the input matrix (rows)
* @param n The width of the input matrix (columns)
*/
__global__ void convert_diagonal(float *D, float *DD, uint m, uint n)
{
const uint tx = blockIdx.x * blockDim.x + threadIdx.x;
uint j = tx % n;
uint i = (tx - j) / n;
// new i is the antidiagonal ordinal, sum of i and j
uint dest_i = i + j;
// new j = j if in upper left half, else j-dist from leading antidiagonal
uint dest_j = j - max(0, (int)dest_i - (int)m + 1);
DD[dest_i * m + dest_j] = D[i * n + j];
}
/** Kernel function for computing "naive" Soft DTW on pairwise Euclidean
* distance matrix for multivariate time series with CUDA. Input D should be a
* __device__ array.
* This version assumes D is a diagonal-major array where m and n are the
* dimensions of the original row-major array. m x n becomes (m+n-1) x min(m,n).
* It also assumes R is a diagonal-major array where (m+2) and (n+2) are the
* dimensions of the original row-major array.
* (m+2) x (n+2) becomes (m+n+3) x min(m+2,n+2)
* This naive version only works for sequence lengths <= 1024 i.e. can fit in
* a single threadblock.
* Assumes only a single threadblock in the kernel launch.
* Each thread can process one anti-diagonal.
* @param D The pairwise squared Euclidean distance array of two time series
* @param R An m+2 x n+2 array that will be filled with the alignments
* @param cost The total path cost will be written to this address
* @param m Length of first time series
* @param n Length of second time series
* @param gamma SoftDTW smoothing parameter
*/
__global__ void softdtw_diagonal_kernel(float *D, float *R, float *cost, uint m,
uint n, float gamma)
{
const uint tx = threadIdx.x;
const uint bd = blockDim.x;
// block size = min(m, n) (length of longest diagonal)
// number of antidiagonals is m+n-1
// D is now (m+n-1) x min(m,n)
// R is now (m+n+3) x min(m+1,n+1)
const uint passes = m + n - 1;
for (uint p = 0; p < passes; p++)
{
uint ii = max(0, (int)p - (int)tx);
uint past_mid = max(0, (int)p - (int)bd + 1);
uint i = ii + 1 - past_mid;
uint j = tx + 1 + past_mid;
if (tx + ii <= p && j <= n)
{
// convert i,j to diagonal-major coordinates
// new j = j if in upper left half, else j-dist from leading
// antidiagonal
uint di = (i - 1) + (j - 1);
uint dj = j - 1 - past_mid;
uint ri = i + j;
uint rj = j - past_mid;
uint r1j = rj - 1;
uint r2j = rj - 1;
uint r3j = rj;
// If we are past the antidiagonal, need to increment the previous
// cell references
if (p >= bd)
{
r1j++;
r2j++;
r3j++;
}
if (p > bd)
{
r1j++;
}
float cost = D[di * bd + dj];
float r1 = R[di * (bd + 2) + r1j];
float r2 = R[(ri - 1) * (bd + 2) + r2j];
float r3 = R[(ri - 1) * (bd + 2) + r3j];
double prev_min = softmin(r1, r2, r3, gamma);
R[ri * (bd + 2) + rj] = cost + prev_min;
}
__syncthreads();
}
if (tx == 0)
{
*cost = R[(m + n) * (bd + 2) + 1];
}
}
/** Kernel function for computing "naive" Soft DTW on pairwise Euclidean
* distance matrix for multiple distance matrices of multivariate time series
* with CUDA. Input D should be a __device__ array.
* This version assumes D is an array of diagonal-major arrays where m and n are
* the dimensions of the original row-major array. m x n becomes (m+n-1) x
* min(m,n). It also assumes R is a diagonal-major array where (m+2) and (n+2)
* are the dimensions of the original row-major array. (m+2) x (n+2) becomes
* (m+n+3) x min(m+2,n+2) This naive version only works for sequence lengths <=
* 1024 i.e. can fit in a single threadblock. Assumes only a single threadblock
* in the kernel launch. Each thread can process one anti-diagonal.
* @param D The pairwise squared Euclidean distance array of two time series
* @param R An m+2 x n+2 array that will be filled with the alignments
* @param cost The total path cost will be written to this address
* @param nD The number of distance matrices in D
* @param m Length of first time series
* @param n Length of second time series
* @param gamma SoftDTW smoothing parameter
*/
__global__ void softdtw_diagonal_kernel_multi(float *D, float *R, float *cost,
uint nD, uint m, uint n,
float gamma)
{
const uint tx = threadIdx.x;
const uint bd = blockDim.x;
const uint bx = blockIdx.x;
const uint bD = bx * (m + n - 1) * min(m, n);
const uint bD2 = bx * (m + n + 3) * min(m + 2, n + 2);
// block size = min(m, n) (length of longest diagonal)
// number of antidiagonals is m+n-1
// D is now (m+n-1) x min(m,n)
// R is now (m+n+3) x min(m+1,n+1)
const uint passes = m + n - 1;
for (uint p = 0; p < passes; p++)
{
uint ii = max(0, (int)p - (int)tx);
uint past_mid = max(0, (int)p - (int)bd + 1);
uint i = ii + 1 - past_mid;
uint j = tx + 1 + past_mid;
if (tx + ii <= p && j <= n)
{
// convert i,j to diagonal-major coordinates
// new j = j if in upper left half, else j-dist from leading
// antidiagonal
uint di = (i - 1) + (j - 1);
uint dj = j - 1 - past_mid;
uint ri = i + j;
uint rj = j - past_mid;
uint r1j = rj - 1;
uint r2j = rj - 1;
uint r3j = rj;
// If we are past the antidiagonal, need to increment the previous
// cell references
if (p >= bd)
{
r1j++;
r2j++;
r3j++;
}
if (p > bd)
{
r1j++;
}
float cost = D[bD + di * bd + dj];
float r1 = R[bD2 + di * (bd + 2) + r1j];
float r2 = R[bD2 + (ri - 1) * (bd + 2) + r2j];
float r3 = R[bD2 + (ri - 1) * (bd + 2) + r3j];
double prev_min = softmin(r1, r2, r3, gamma);
R[bD2 + ri * (bd + 2) + rj] = cost + prev_min;
}
__syncthreads();
}
if (tx == 0)
{
cost[bx] = R[bD2 + (m + n) * (bd + 2) + 1];
}
}
|
5794624aa3ed62a634e206fbf25058d3c27a377a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/hip/HIPGeneratorImpl.h>
#include <ATen/native/hip/DistributionTemplates.h>
#include <ATen/native/Resize.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty_like.h>
#include <ATen/ops/leaky_relu.h>
#include <ATen/ops/rrelu_with_noise_native.h>
#endif
namespace at { namespace native {
template <typename scalar_t, int unroll_factor, typename F>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(256, 4)
#endif
__global__ void rrelu_with_noise_cuda_kernel(
int numel,
PhiloxCudaState philox_args,
scalar_t* output,
scalar_t* input,
scalar_t* noise,
double lower,
double upper,
const F& random_func) {
auto seeds = at::cuda::philox::unpack(philox_args);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(std::get<0>(seeds),
idx,
std::get<1>(seeds),
&state);
int grid_stride = blockDim.x * gridDim.x * unroll_factor;
int rounded_size = ((numel - 1) / grid_stride + 1) * grid_stride;
double range = upper - lower;
for (int linear_index = idx; linear_index < rounded_size; linear_index += grid_stride) {
auto rand = random_func(&state);
// ensure that (&rand.x)[ii] is safe
static_assert(sizeof(rand)/sizeof(rand.x) == unroll_factor, "");
#pragma unroll
for (int ii = 0; ii < unroll_factor; ii++) {
int li = linear_index + blockDim.x * gridDim.x * ii;
if (li >= numel) {
continue;
}
scalar_t r = static_cast<scalar_t>((&rand.x)[ii]);
r = r * range + lower;
if (input[li] <= 0) {
output[li] = input[li] * r;
noise[li] = r;
} else {
output[li] = input[li];
noise[li] = static_cast<scalar_t>(1);
}
}
__syncthreads();
}
}
template <typename scalar_t>
inline void _rrelu_with_noise_cuda_train(
Tensor& output,
const Tensor& input_,
const Tensor& noise_,
const Scalar& lower_,
const Scalar& upper_,
c10::optional<Generator> generator) {
auto input = input_.contiguous();
auto noise = noise_.contiguous();
Tensor tmp_output = output.contiguous();
int64_t numel = input.numel();
auto execution_policy = calc_execution_policy(numel);
auto counter_offset = std::get<0>(execution_policy);
auto grid = std::get<1>(execution_policy);
auto block = std::get<2>(execution_policy);
auto gen = get_generator_or_default<CUDAGeneratorImpl>(
generator, cuda::detail::getDefaultCUDAGenerator());
PhiloxCudaState rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_cuda_state(counter_offset);
}
scalar_t* input_data = input.data_ptr<scalar_t>();
scalar_t* noise_data = noise.data_ptr<scalar_t>();
scalar_t* output_data = tmp_output.data_ptr<scalar_t>();
double lower = lower_.to<double>();
double upper = upper_.to<double>();
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (std::is_same<scalar_t, double>::value) {
hipLaunchKernelGGL(( rrelu_with_noise_cuda_kernel<scalar_t, 2>), dim3(grid), dim3(block), 0, stream,
numel,
rng_engine_inputs,
output_data,
input_data,
noise_data,
lower,
upper,
[] __device__ (hiprandStatePhilox4_32_10_t* state) {
return hiprand_uniform2_double(state);
});
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
// half and float
hipLaunchKernelGGL(( rrelu_with_noise_cuda_kernel<scalar_t, 4>), dim3(grid), dim3(block), 0, stream,
numel,
rng_engine_inputs,
output_data,
input_data,
noise_data,
lower, upper,
[] __device__ (hiprandStatePhilox4_32_10_t* state) {
return hiprand_uniform4(state);
});
C10_HIP_KERNEL_LAUNCH_CHECK();
}
if (!output.is_contiguous()) {
output.copy_(tmp_output);
}
}
Tensor& rrelu_with_noise_out_cuda(const Tensor& self,
const Tensor& noise,
const Scalar& lower,
const Scalar& upper,
bool training,
c10::optional<Generator> generator,
Tensor& output) {
at::native::resize_output(output, self.sizes());
if (self.numel() == 0) {
return output;
}
TensorArg self_arg{self, "self", 1}, noise_arg{noise, "noise", 2},
output_arg{output, "output", 3};
checkAllSameGPU("rrelu_with_noise_out_cuda", {self_arg, noise_arg, output_arg});
if (training) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
self.scalar_type(), "rrelu_with_noise_out_cuda", [&] {
_rrelu_with_noise_cuda_train<scalar_t>(
output, self, noise, lower, upper, generator);
});
}
else {
auto lower_tensor = lower.to<double>();
auto upper_tensor = upper.to<double>();
Scalar negative_slope = (lower_tensor + upper_tensor) / 2;
at::leaky_relu_out(output, self, negative_slope);
}
return output;
}
Tensor rrelu_with_noise_cuda(
const Tensor& self,
const Tensor& noise,
const Scalar& lower,
const Scalar& upper,
bool training,
c10::optional<Generator> generator) {
Tensor output = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
return at::native::rrelu_with_noise_out_cuda(self, noise, lower, upper, training, generator, output);
}
Tensor& rrelu_with_noise_cuda_(
Tensor& self,
const Tensor& noise,
const Scalar& lower,
const Scalar& upper,
bool training,
c10::optional<Generator> generator) {
return at::native::rrelu_with_noise_out_cuda(
self, noise, lower, upper, training, generator, self);
}
}} // namespace at::native
|
5794624aa3ed62a634e206fbf25058d3c27a377a.cu
|
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <ATen/native/cuda/DistributionTemplates.h>
#include <ATen/native/Resize.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty_like.h>
#include <ATen/ops/leaky_relu.h>
#include <ATen/ops/rrelu_with_noise_native.h>
#endif
namespace at { namespace native {
template <typename scalar_t, int unroll_factor, typename F>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(256, 4)
#endif
__global__ void rrelu_with_noise_cuda_kernel(
int numel,
PhiloxCudaState philox_args,
scalar_t* output,
scalar_t* input,
scalar_t* noise,
double lower,
double upper,
const F& random_func) {
auto seeds = at::cuda::philox::unpack(philox_args);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(std::get<0>(seeds),
idx,
std::get<1>(seeds),
&state);
int grid_stride = blockDim.x * gridDim.x * unroll_factor;
int rounded_size = ((numel - 1) / grid_stride + 1) * grid_stride;
double range = upper - lower;
for (int linear_index = idx; linear_index < rounded_size; linear_index += grid_stride) {
auto rand = random_func(&state);
// ensure that (&rand.x)[ii] is safe
static_assert(sizeof(rand)/sizeof(rand.x) == unroll_factor, "");
#pragma unroll
for (int ii = 0; ii < unroll_factor; ii++) {
int li = linear_index + blockDim.x * gridDim.x * ii;
if (li >= numel) {
continue;
}
scalar_t r = static_cast<scalar_t>((&rand.x)[ii]);
r = r * range + lower;
if (input[li] <= 0) {
output[li] = input[li] * r;
noise[li] = r;
} else {
output[li] = input[li];
noise[li] = static_cast<scalar_t>(1);
}
}
__syncthreads();
}
}
template <typename scalar_t>
inline void _rrelu_with_noise_cuda_train(
Tensor& output,
const Tensor& input_,
const Tensor& noise_,
const Scalar& lower_,
const Scalar& upper_,
c10::optional<Generator> generator) {
auto input = input_.contiguous();
auto noise = noise_.contiguous();
Tensor tmp_output = output.contiguous();
int64_t numel = input.numel();
auto execution_policy = calc_execution_policy(numel);
auto counter_offset = std::get<0>(execution_policy);
auto grid = std::get<1>(execution_policy);
auto block = std::get<2>(execution_policy);
auto gen = get_generator_or_default<CUDAGeneratorImpl>(
generator, cuda::detail::getDefaultCUDAGenerator());
PhiloxCudaState rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_cuda_state(counter_offset);
}
scalar_t* input_data = input.data_ptr<scalar_t>();
scalar_t* noise_data = noise.data_ptr<scalar_t>();
scalar_t* output_data = tmp_output.data_ptr<scalar_t>();
double lower = lower_.to<double>();
double upper = upper_.to<double>();
auto stream = at::cuda::getCurrentCUDAStream();
if (std::is_same<scalar_t, double>::value) {
rrelu_with_noise_cuda_kernel<scalar_t, 2><<<grid, block, 0, stream>>>(
numel,
rng_engine_inputs,
output_data,
input_data,
noise_data,
lower,
upper,
[] __device__ (curandStatePhilox4_32_10_t* state) {
return curand_uniform2_double(state);
});
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
// half and float
rrelu_with_noise_cuda_kernel<scalar_t, 4><<<grid, block, 0, stream>>>(
numel,
rng_engine_inputs,
output_data,
input_data,
noise_data,
lower, upper,
[] __device__ (curandStatePhilox4_32_10_t* state) {
return curand_uniform4(state);
});
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
if (!output.is_contiguous()) {
output.copy_(tmp_output);
}
}
Tensor& rrelu_with_noise_out_cuda(const Tensor& self,
const Tensor& noise,
const Scalar& lower,
const Scalar& upper,
bool training,
c10::optional<Generator> generator,
Tensor& output) {
at::native::resize_output(output, self.sizes());
if (self.numel() == 0) {
return output;
}
TensorArg self_arg{self, "self", 1}, noise_arg{noise, "noise", 2},
output_arg{output, "output", 3};
checkAllSameGPU("rrelu_with_noise_out_cuda", {self_arg, noise_arg, output_arg});
if (training) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
self.scalar_type(), "rrelu_with_noise_out_cuda", [&] {
_rrelu_with_noise_cuda_train<scalar_t>(
output, self, noise, lower, upper, generator);
});
}
else {
auto lower_tensor = lower.to<double>();
auto upper_tensor = upper.to<double>();
Scalar negative_slope = (lower_tensor + upper_tensor) / 2;
at::leaky_relu_out(output, self, negative_slope);
}
return output;
}
Tensor rrelu_with_noise_cuda(
const Tensor& self,
const Tensor& noise,
const Scalar& lower,
const Scalar& upper,
bool training,
c10::optional<Generator> generator) {
Tensor output = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
return at::native::rrelu_with_noise_out_cuda(self, noise, lower, upper, training, generator, output);
}
Tensor& rrelu_with_noise_cuda_(
Tensor& self,
const Tensor& noise,
const Scalar& lower,
const Scalar& upper,
bool training,
c10::optional<Generator> generator) {
return at::native::rrelu_with_noise_out_cuda(
self, noise, lower, upper, training, generator, self);
}
}} // namespace at::native
|
71bfd002c049a156a4f4553a33233f4d7313d138.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* How to compile (assume cuda is installed at /usr/local/cuda/)
* nvcc -c -I/usr/local/cuda/include syevd_example.cpp
* g++ -o a.out syevd_example.o -L/usr/local/cuda/lib64 -lcudart -lcusolver
*
*/
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <algorithm>
#include <hip/hip_runtime.h>
#include <cusolverDn.h>
/* Time-keeping modules */
#include <chrono>
#include <ctime>
/* Type definitions */
#include "typeDefs.h"
/* CUDA functions */
#include "eigenFinder.h"
#include "dsyevd_stream_functions.cuh"
#include "dsyevj_stream_functions.cuh"
//~ #include "geqrf_stream_functions.cuh"
#include "dsyevj_batch_functions.cuh"
#include "kernel_functions.cuh"
/* MAGMA functions */
//~ #include "magma_dsyev_batch_functions.cuh"
/* Self-improvised QR algorithm */
#include "QR_batch_function.cuh"
/* Self-written Jacobi algorithm */
#include "jacobi_kernel_function_new.cuh"
/* Additional functions */
#include "fill_matrices.cuh"
void printMatrix(int m, int n, const floatType*A, const char* name){
for(int row = 0 ; row < m ; row++){
for(int col = 0 ; col < n ; col++){
floatType Areg = A[row + col*m];
printf("%s(%d,%d) = %f\n", name, row+1, col+1, Areg);
}
}
}
int main(int argc, char*argv[]){
if (argc != 3){
std::cout << "you need to input matrix size and number, respectively" << std::endl;
}
const int N = atoi(argv[1]);
const int m = N;
const int batchSize = atoi(argv[2]);
/* Declare host arrays */
floatType *A = new floatType [m*m*batchSize];
floatType *V = new floatType [m*m*batchSize];
floatType *W = new floatType [m*batchSize];
/* Declare device arrays */
floatType *d_A = NULL; /* m-by-m-by-batchSize */
floatType *d_W = NULL; /* m-by-batchSizee */
/* Fill up array A with matrix elements */
fillSymmetricMatrices_full(A, m, batchSize);
/* Allocate A on device */
hipMalloc ((void**)&d_A , sizeof(floatType) * m * m * batchSize);
hipMalloc ((void**)&d_W , sizeof(floatType) * m * batchSize);
/* Copy A to device */
hipMemcpy(d_A, A, sizeof(floatType) * m * m * batchSize, hipMemcpyHostToDevice);
hipDeviceSynchronize();
/* Call diagonalisation routine of choice */
//~ double time_gpu = diagonalise_kernel(d_A, d_W, m, batchSize);
//~ double time_gpu = diagonalise_stream_syevd(d_A, d_W, m, batchSize);
//~ double time_gpu = diagonalise_stream_syevj(d_A, d_W, m, batchSize);
//~ double time_gpu = jacobi_kernels(d_A, d_W, m, batchSize);
double time_gpu = jacobi_kernels_parallel(d_A, d_W, m, batchSize);
//~ double time_gpu = diagonalise_batch_QR(d_A, d_W, m, batchSize);
//~ double time_gpu = diagonalise_batch_syevj(d_A, d_W, m, batchSize);
std::cout<<"Time gpu: "<< time_gpu << " s" << std::endl;
/* Code A and W from device */
hipMemcpy(V, d_A, sizeof(floatType) * m * m * batchSize, hipMemcpyDeviceToHost);
hipMemcpy(W, d_W, sizeof(floatType) * m * batchSize , hipMemcpyDeviceToHost);
/* CPU BENCHMARKING */
/* Symmetric matrix size */
int matSize = m*(m+1)/2;
/* Declare CPU arrays */
double *A_CPU = new double [matSize*batchSize];
double *V_CPU = new double [m*m*batchSize];
double *W_CPU = new double [m*batchSize];
/* Fill up array A with matrix elements */
fillSymmetricMatrices_symm(A_CPU, m, batchSize);
auto start = std::chrono::system_clock::now();
/* Diagonalise A using MKL LAPACK */
/* Arg 1 is A and must be upper-triangle */
/* Arg 2 are the eigenvalues */
/* Arg 3 are the eigenvectors */
/* Arg 4 is the dimension */
for (int i=0; i<batchSize; i++){
findEigenReal(&A_CPU[i*matSize], &W_CPU[m*i], &V_CPU[i*m*m], m);
}
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> time_raw = end-start;
double time_cpu = time_raw.count();
std::cout<<"Time cpu: "<< time_cpu << " s" << std::endl;
//~ if (N <=6){
//~ for (int i=0; i<m; i++){
//~ std::cout << std::endl;
//~ std::cout << "E: " << W[i] << std::endl;
//~ for (int j=0; j<m; j++){
//~ std::cout << V[i*m+j] << std::endl;
//~ }
//~ }
//~ }
/* Print any numerically big differences in eigenvalues between GPU and CPU*/
floatType maxDiff = 0;
int max_h, max_i;
for (int M=0; M<batchSize; M++){
std::vector<floatType> eigenVals_gpu (m);
for (int i=0; i<m; i++){
eigenVals_gpu[i] = W[M*m + i];
}
std::sort(eigenVals_gpu.begin(), eigenVals_gpu.end());
for (int i=0; i<m; i++){
//~ std::cout << M << " " << i << " " << eigenVals_gpu[i] << std::endl;
//~ std::cout << M << " " << i << " " << W_CPU[M*m+i] << "\n" << std::endl;
floatType diff = abs(W_CPU[M*m + i] - eigenVals_gpu[i]);
if (diff > maxDiff){
maxDiff = diff;
max_h = M;
max_i = i;
//~ std::cout << M << " " << i << " " << diff << std::endl;
}
}
}
std::cout << std::endl;
std::cout << "Max diff: " << maxDiff << std::endl;
std::cout << "h: " << max_h << " i: " << max_i << std::endl;
/* free resources */
if (d_A) hipFree(d_A);
if (d_W) hipFree(d_W);
delete [] A;
delete [] V;
delete [] W;
delete [] A_CPU;
delete [] V_CPU;
delete [] W_CPU;
hipDeviceReset();
return 0;
}
|
71bfd002c049a156a4f4553a33233f4d7313d138.cu
|
/*
* How to compile (assume cuda is installed at /usr/local/cuda/)
* nvcc -c -I/usr/local/cuda/include syevd_example.cpp
* g++ -o a.out syevd_example.o -L/usr/local/cuda/lib64 -lcudart -lcusolver
*
*/
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <algorithm>
#include <cuda_runtime.h>
#include <cusolverDn.h>
/* Time-keeping modules */
#include <chrono>
#include <ctime>
/* Type definitions */
#include "typeDefs.h"
/* CUDA functions */
#include "eigenFinder.h"
#include "dsyevd_stream_functions.cuh"
#include "dsyevj_stream_functions.cuh"
//~ #include "geqrf_stream_functions.cuh"
#include "dsyevj_batch_functions.cuh"
#include "kernel_functions.cuh"
/* MAGMA functions */
//~ #include "magma_dsyev_batch_functions.cuh"
/* Self-improvised QR algorithm */
#include "QR_batch_function.cuh"
/* Self-written Jacobi algorithm */
#include "jacobi_kernel_function_new.cuh"
/* Additional functions */
#include "fill_matrices.cuh"
void printMatrix(int m, int n, const floatType*A, const char* name){
for(int row = 0 ; row < m ; row++){
for(int col = 0 ; col < n ; col++){
floatType Areg = A[row + col*m];
printf("%s(%d,%d) = %f\n", name, row+1, col+1, Areg);
}
}
}
int main(int argc, char*argv[]){
if (argc != 3){
std::cout << "you need to input matrix size and number, respectively" << std::endl;
}
const int N = atoi(argv[1]);
const int m = N;
const int batchSize = atoi(argv[2]);
/* Declare host arrays */
floatType *A = new floatType [m*m*batchSize];
floatType *V = new floatType [m*m*batchSize];
floatType *W = new floatType [m*batchSize];
/* Declare device arrays */
floatType *d_A = NULL; /* m-by-m-by-batchSize */
floatType *d_W = NULL; /* m-by-batchSizee */
/* Fill up array A with matrix elements */
fillSymmetricMatrices_full(A, m, batchSize);
/* Allocate A on device */
cudaMalloc ((void**)&d_A , sizeof(floatType) * m * m * batchSize);
cudaMalloc ((void**)&d_W , sizeof(floatType) * m * batchSize);
/* Copy A to device */
cudaMemcpy(d_A, A, sizeof(floatType) * m * m * batchSize, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
/* Call diagonalisation routine of choice */
//~ double time_gpu = diagonalise_kernel(d_A, d_W, m, batchSize);
//~ double time_gpu = diagonalise_stream_syevd(d_A, d_W, m, batchSize);
//~ double time_gpu = diagonalise_stream_syevj(d_A, d_W, m, batchSize);
//~ double time_gpu = jacobi_kernels(d_A, d_W, m, batchSize);
double time_gpu = jacobi_kernels_parallel(d_A, d_W, m, batchSize);
//~ double time_gpu = diagonalise_batch_QR(d_A, d_W, m, batchSize);
//~ double time_gpu = diagonalise_batch_syevj(d_A, d_W, m, batchSize);
std::cout<<"Time gpu: "<< time_gpu << " s" << std::endl;
/* Code A and W from device */
cudaMemcpy(V, d_A, sizeof(floatType) * m * m * batchSize, cudaMemcpyDeviceToHost);
cudaMemcpy(W, d_W, sizeof(floatType) * m * batchSize , cudaMemcpyDeviceToHost);
/* CPU BENCHMARKING */
/* Symmetric matrix size */
int matSize = m*(m+1)/2;
/* Declare CPU arrays */
double *A_CPU = new double [matSize*batchSize];
double *V_CPU = new double [m*m*batchSize];
double *W_CPU = new double [m*batchSize];
/* Fill up array A with matrix elements */
fillSymmetricMatrices_symm(A_CPU, m, batchSize);
auto start = std::chrono::system_clock::now();
/* Diagonalise A using MKL LAPACK */
/* Arg 1 is A and must be upper-triangle */
/* Arg 2 are the eigenvalues */
/* Arg 3 are the eigenvectors */
/* Arg 4 is the dimension */
for (int i=0; i<batchSize; i++){
findEigenReal(&A_CPU[i*matSize], &W_CPU[m*i], &V_CPU[i*m*m], m);
}
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> time_raw = end-start;
double time_cpu = time_raw.count();
std::cout<<"Time cpu: "<< time_cpu << " s" << std::endl;
//~ if (N <=6){
//~ for (int i=0; i<m; i++){
//~ std::cout << std::endl;
//~ std::cout << "E: " << W[i] << std::endl;
//~ for (int j=0; j<m; j++){
//~ std::cout << V[i*m+j] << std::endl;
//~ }
//~ }
//~ }
/* Print any numerically big differences in eigenvalues between GPU and CPU*/
floatType maxDiff = 0;
int max_h, max_i;
for (int M=0; M<batchSize; M++){
std::vector<floatType> eigenVals_gpu (m);
for (int i=0; i<m; i++){
eigenVals_gpu[i] = W[M*m + i];
}
std::sort(eigenVals_gpu.begin(), eigenVals_gpu.end());
for (int i=0; i<m; i++){
//~ std::cout << M << " " << i << " " << eigenVals_gpu[i] << std::endl;
//~ std::cout << M << " " << i << " " << W_CPU[M*m+i] << "\n" << std::endl;
floatType diff = abs(W_CPU[M*m + i] - eigenVals_gpu[i]);
if (diff > maxDiff){
maxDiff = diff;
max_h = M;
max_i = i;
//~ std::cout << M << " " << i << " " << diff << std::endl;
}
}
}
std::cout << std::endl;
std::cout << "Max diff: " << maxDiff << std::endl;
std::cout << "h: " << max_h << " i: " << max_i << std::endl;
/* free resources */
if (d_A) cudaFree(d_A);
if (d_W) cudaFree(d_W);
delete [] A;
delete [] V;
delete [] W;
delete [] A_CPU;
delete [] V_CPU;
delete [] W_CPU;
cudaDeviceReset();
return 0;
}
|
91cff1ab5d2035d17057e32647c7b39e96b79182.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <linalg/multiply.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
#include "unary_op_hip.cuh"
namespace MLCommon {
namespace LinAlg {
template <typename T>
class MultiplyTest
: public ::testing::TestWithParam<raft::linalg::UnaryOpInputs<T>> {
protected:
void SetUp() override {
params =
::testing::TestWithParam<raft::linalg::UnaryOpInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int len = params.len;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
raft::allocate(in, len);
raft::allocate(out_ref, len);
raft::allocate(out, len);
r.uniform(in, len, T(-1.0), T(1.0), stream);
raft::linalg::naiveScale(out_ref, in, params.scalar, len, stream);
multiplyScalar(out, in, params.scalar, len, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out));
}
protected:
raft::linalg::UnaryOpInputs<T> params;
T *in, *out_ref, *out;
};
const std::vector<raft::linalg::UnaryOpInputs<float>> inputsf = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
typedef MultiplyTest<float> MultiplyTestF;
TEST_P(MultiplyTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MultiplyTests, MultiplyTestF,
::testing::ValuesIn(inputsf));
typedef MultiplyTest<double> MultiplyTestD;
const std::vector<raft::linalg::UnaryOpInputs<double>> inputsd = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
TEST_P(MultiplyTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MultiplyTests, MultiplyTestD,
::testing::ValuesIn(inputsd));
} // end namespace LinAlg
} // end namespace MLCommon
|
91cff1ab5d2035d17057e32647c7b39e96b79182.cu
|
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <linalg/multiply.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
#include "unary_op.cuh"
namespace MLCommon {
namespace LinAlg {
template <typename T>
class MultiplyTest
: public ::testing::TestWithParam<raft::linalg::UnaryOpInputs<T>> {
protected:
void SetUp() override {
params =
::testing::TestWithParam<raft::linalg::UnaryOpInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int len = params.len;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
raft::allocate(in, len);
raft::allocate(out_ref, len);
raft::allocate(out, len);
r.uniform(in, len, T(-1.0), T(1.0), stream);
raft::linalg::naiveScale(out_ref, in, params.scalar, len, stream);
multiplyScalar(out, in, params.scalar, len, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out));
}
protected:
raft::linalg::UnaryOpInputs<T> params;
T *in, *out_ref, *out;
};
const std::vector<raft::linalg::UnaryOpInputs<float>> inputsf = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
typedef MultiplyTest<float> MultiplyTestF;
TEST_P(MultiplyTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MultiplyTests, MultiplyTestF,
::testing::ValuesIn(inputsf));
typedef MultiplyTest<double> MultiplyTestD;
const std::vector<raft::linalg::UnaryOpInputs<double>> inputsd = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
TEST_P(MultiplyTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MultiplyTests, MultiplyTestD,
::testing::ValuesIn(inputsd));
} // end namespace LinAlg
} // end namespace MLCommon
|
683974f7126d0bcfdb48c784806bed9f2b037637.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <array/NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void splitCuda(const void* vx, const Nd4jLong* xShapeInfo, void* pVz, const Nd4jLong* zTadShapeInfo, const int axis) {
const T* x = reinterpret_cast<const T*>(vx);
__shared__ Nd4jLong xLen, totalThreads;
__shared__ int xRank, zDim;
if (threadIdx.x == 0) {
xLen = shape::length(xShapeInfo);
xRank = shape::rank(xShapeInfo);
zDim = shape::shapeOf(zTadShapeInfo)[axis]; // same for all input arrays
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int coords[MAX_RANK];
for (uint64_t i = tid; i < xLen; i += totalThreads) {
shape::index2coords(i, xShapeInfo, coords);
const auto xOffset = shape::getOffset(xShapeInfo, coords);
auto *z = reinterpret_cast<T*>(reinterpret_cast<void **>(pVz)[coords[axis] / zDim]);
coords[axis] %= zDim;
const auto zOffset = shape::getOffset(zTadShapeInfo, coords);
z[zOffset] = x[xOffset];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void splitCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo, void* pVz, const Nd4jLong* zTadShapeInfo, const int axis) {
hipLaunchKernelGGL(( splitCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 256, *stream, vx, xShapeInfo, pVz, zTadShapeInfo, axis);
}
BUILD_SINGLE_TEMPLATE(template void splitCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* pVz, const Nd4jLong* zTadShapeInfo, const int axis), LIBND4J_TYPES);
//////////////////////////////////////////////////////////////////////////
void split(sd::LaunchContext* context, const NDArray& input, std::vector<NDArray*>& outArrs, const int axis) {
const int numOfSubArrs = outArrs.size();
const auto sizeofT = input.sizeOfT();
for(int i = 0; i < numOfSubArrs; ++i)
outArrs[i]->syncToDevice();
input.syncToDevice();
bool luckCase1 = ((axis == 0 && input.ordering() == 'c') || (axis == input.rankOf() - 1 && input.ordering() == 'f')) && input.ews() == 1;
if(luckCase1) {
for (uint i = 0; i < numOfSubArrs; ++i) {
luckCase1 &= outArrs[i]->ordering() == input.ordering() && outArrs[i]->ews() == 1;
if(!luckCase1)
break;
}
}
if(luckCase1) { // for example {1,10} + {2,10} + {3,10} = {6, 10} order c; or {10,1} + {10,2} + {10,3} = {10, 6} order f
void* x = static_cast<int8_t*>(input.getSpecialBuffer());
for (uint i = 0; i < numOfSubArrs; ++i) {
const auto memAmountToCopy = outArrs[i]->lengthOf() * sizeofT;
hipMemcpyAsync(static_cast<int8_t*>(outArrs[i]->getSpecialBuffer()), x, memAmountToCopy, hipMemcpyDeviceToDevice, *context->getCudaStream());
x = static_cast<int8_t*>(x) + memAmountToCopy;
}
if(hipStreamSynchronize(*context->getCudaStream()) != 0)
throw std::runtime_error("split cuda: luckCase1 failed!");
for(int i = 0; i < numOfSubArrs; ++i)
outArrs[i]->tickWriteDevice();
input.tickReadDevice();
return;
}
// const bool isXcontin = input.strideAt(axis) == 1;
// bool areOutputsContin = true;
// bool allSameOrder = true;
// std::vector<Nd4jLong> strideOfContigStride(outArrs.size());
// if(isXcontin) {
// for (uint i = 0; i < outArrs.size(); ++i) {
// areOutputsContin &= outArrs[i]->strideAt(axis) == 1;
// allSameOrder &= input.ordering() == outArrs[i]->ordering();
// if(!areOutputsContin || !allSameOrder)
// break;
// strideOfContigStride[i] = shape::strideOverContigAxis(axis, outArrs[i]->getShapeInfo());
// }
// }
// const bool luckCase2 = isXcontin && areOutputsContin && allSameOrder;
// if(luckCase2) { // for example {2,1,3} + {2,5,3} + {2,10,3} = {2,16,3}, here axis 1 shoud have stride = 1 for all inputs arrays and input array
// const auto xStep = shape::strideOverContigAxis(axis, input.getShapeInfo());
// const auto zDim = outArrs[0]->sizeAt(axis); // same for all outArrs
// for (uint i = 0; i < input.lengthOf() / input.sizeAt(axis); ++i) {
// const auto iShift = i * sizeofT;
// void* x = static_cast<int8_t*>(input.getSpecialBuffer()) + xStep * iShift;
// for (uint j = 0; j < numOfSubArrs; ++j) {
// void* z = static_cast<int8_t*>(outArrs[j]->getSpecialBuffer()) + strideOfContigStride[j] * iShift;
// const auto memSizeToCopy = zDim * sizeofT;
// hipMemcpyAsync(z, x, memSizeToCopy, hipMemcpyDeviceToDevice, *context->getCudaStream());
// x = static_cast<int8_t*>(x) + memSizeToCopy;
// }
// }
// if(hipStreamSynchronize(*context->getCudaStream()) != 0)
// throw std::runtime_error("split cuda: luckCase2 failed!");
// }
// else { // general (slower) case
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
// prepare arrays of pointers on buffers and shapes
std::vector<void*> hOutBuffers(numOfSubArrs);
for(int i = 0; i < numOfSubArrs; ++i)
hOutBuffers[i] = outArrs[i]->getSpecialBuffer();
PointersManager manager(context, "helpers::split");
void* dOutBuffers = manager.replicatePointer(hOutBuffers.data(), hOutBuffers.size() * sizeof(void*));
BUILD_SINGLE_SELECTOR(input.dataType(), splitCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), dOutBuffers, outArrs[0]->specialShapeInfo(), axis), LIBND4J_TYPES);
manager.synchronize();
// }
for(int i = 0; i < numOfSubArrs; ++i)
outArrs[i]->tickWriteDevice();
input.tickReadDevice();
}
}
}
}
|
683974f7126d0bcfdb48c784806bed9f2b037637.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <array/NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void splitCuda(const void* vx, const Nd4jLong* xShapeInfo, void* pVz, const Nd4jLong* zTadShapeInfo, const int axis) {
const T* x = reinterpret_cast<const T*>(vx);
__shared__ Nd4jLong xLen, totalThreads;
__shared__ int xRank, zDim;
if (threadIdx.x == 0) {
xLen = shape::length(xShapeInfo);
xRank = shape::rank(xShapeInfo);
zDim = shape::shapeOf(zTadShapeInfo)[axis]; // same for all input arrays
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int coords[MAX_RANK];
for (uint64_t i = tid; i < xLen; i += totalThreads) {
shape::index2coords(i, xShapeInfo, coords);
const auto xOffset = shape::getOffset(xShapeInfo, coords);
auto *z = reinterpret_cast<T*>(reinterpret_cast<void **>(pVz)[coords[axis] / zDim]);
coords[axis] %= zDim;
const auto zOffset = shape::getOffset(zTadShapeInfo, coords);
z[zOffset] = x[xOffset];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void splitCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo, void* pVz, const Nd4jLong* zTadShapeInfo, const int axis) {
splitCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(vx, xShapeInfo, pVz, zTadShapeInfo, axis);
}
BUILD_SINGLE_TEMPLATE(template void splitCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* pVz, const Nd4jLong* zTadShapeInfo, const int axis), LIBND4J_TYPES);
//////////////////////////////////////////////////////////////////////////
void split(sd::LaunchContext* context, const NDArray& input, std::vector<NDArray*>& outArrs, const int axis) {
const int numOfSubArrs = outArrs.size();
const auto sizeofT = input.sizeOfT();
for(int i = 0; i < numOfSubArrs; ++i)
outArrs[i]->syncToDevice();
input.syncToDevice();
bool luckCase1 = ((axis == 0 && input.ordering() == 'c') || (axis == input.rankOf() - 1 && input.ordering() == 'f')) && input.ews() == 1;
if(luckCase1) {
for (uint i = 0; i < numOfSubArrs; ++i) {
luckCase1 &= outArrs[i]->ordering() == input.ordering() && outArrs[i]->ews() == 1;
if(!luckCase1)
break;
}
}
if(luckCase1) { // for example {1,10} + {2,10} + {3,10} = {6, 10} order c; or {10,1} + {10,2} + {10,3} = {10, 6} order f
void* x = static_cast<int8_t*>(input.getSpecialBuffer());
for (uint i = 0; i < numOfSubArrs; ++i) {
const auto memAmountToCopy = outArrs[i]->lengthOf() * sizeofT;
cudaMemcpyAsync(static_cast<int8_t*>(outArrs[i]->getSpecialBuffer()), x, memAmountToCopy, cudaMemcpyDeviceToDevice, *context->getCudaStream());
x = static_cast<int8_t*>(x) + memAmountToCopy;
}
if(cudaStreamSynchronize(*context->getCudaStream()) != 0)
throw std::runtime_error("split cuda: luckCase1 failed!");
for(int i = 0; i < numOfSubArrs; ++i)
outArrs[i]->tickWriteDevice();
input.tickReadDevice();
return;
}
// const bool isXcontin = input.strideAt(axis) == 1;
// bool areOutputsContin = true;
// bool allSameOrder = true;
// std::vector<Nd4jLong> strideOfContigStride(outArrs.size());
// if(isXcontin) {
// for (uint i = 0; i < outArrs.size(); ++i) {
// areOutputsContin &= outArrs[i]->strideAt(axis) == 1;
// allSameOrder &= input.ordering() == outArrs[i]->ordering();
// if(!areOutputsContin || !allSameOrder)
// break;
// strideOfContigStride[i] = shape::strideOverContigAxis(axis, outArrs[i]->getShapeInfo());
// }
// }
// const bool luckCase2 = isXcontin && areOutputsContin && allSameOrder;
// if(luckCase2) { // for example {2,1,3} + {2,5,3} + {2,10,3} = {2,16,3}, here axis 1 shoud have stride = 1 for all inputs arrays and input array
// const auto xStep = shape::strideOverContigAxis(axis, input.getShapeInfo());
// const auto zDim = outArrs[0]->sizeAt(axis); // same for all outArrs
// for (uint i = 0; i < input.lengthOf() / input.sizeAt(axis); ++i) {
// const auto iShift = i * sizeofT;
// void* x = static_cast<int8_t*>(input.getSpecialBuffer()) + xStep * iShift;
// for (uint j = 0; j < numOfSubArrs; ++j) {
// void* z = static_cast<int8_t*>(outArrs[j]->getSpecialBuffer()) + strideOfContigStride[j] * iShift;
// const auto memSizeToCopy = zDim * sizeofT;
// cudaMemcpyAsync(z, x, memSizeToCopy, cudaMemcpyDeviceToDevice, *context->getCudaStream());
// x = static_cast<int8_t*>(x) + memSizeToCopy;
// }
// }
// if(cudaStreamSynchronize(*context->getCudaStream()) != 0)
// throw std::runtime_error("split cuda: luckCase2 failed!");
// }
// else { // general (slower) case
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
// prepare arrays of pointers on buffers and shapes
std::vector<void*> hOutBuffers(numOfSubArrs);
for(int i = 0; i < numOfSubArrs; ++i)
hOutBuffers[i] = outArrs[i]->getSpecialBuffer();
PointersManager manager(context, "helpers::split");
void* dOutBuffers = manager.replicatePointer(hOutBuffers.data(), hOutBuffers.size() * sizeof(void*));
BUILD_SINGLE_SELECTOR(input.dataType(), splitCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), dOutBuffers, outArrs[0]->specialShapeInfo(), axis), LIBND4J_TYPES);
manager.synchronize();
// }
for(int i = 0; i < numOfSubArrs; ++i)
outArrs[i]->tickWriteDevice();
input.tickReadDevice();
}
}
}
}
|
dca2c042cea61e3dd8e52ee72f0d5dcbafaeae09.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <opencv2/core/core.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv.hpp>
__global__ void rotate(char *d_frame_in, char *d_frame_out, int height, int width)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
for(int y=0; y<height; y++)
{
for(int z=0; z<3; z++){
d_frame_out[y*width+3*x+z] = d_frame_in[y*width+3*(width-x)+z];
}
}
}
int main()
{
IplImage *Image1 = cvLoadImage("lena.jpg", 1);
IplImage *Image2 = cvCreateImage(cvSize(Image1->width, Image1->height), IPL_DEPTH_8U, 3);
if(Image1 == NULL) return 0;
cvNamedWindow("readImage", CV_WINDOW_AUTOSIZE);
cvNamedWindow("newImage", CV_WINDOW_AUTOSIZE);
char *frame = (char*)calloc(Image1->imageSize,sizeof(char));
char *dis = (char*)calloc(Image1->imageSize,sizeof(char));
for(int y=0; y<Image1->height; y++)
{
for(int x=0; x<Image1->width; x++)
{
for(int z=0; z<3; z++) {
frame[y*Image1->widthStep+3*x+z] = Image1->imageData[y*Image1->widthStep+3*x+z];
}
}
}
char *d_frame_in;
char *d_frame_out;
hipMalloc((void**)&d_frame_in, sizeof(char)*(Image1->imageSize));
hipMalloc((void**)&d_frame_out, sizeof(char)*(Image1->imageSize));
hipMemcpy(d_frame_in, frame, sizeof(char)*(Image1->imageSize), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( rotate), dim3(16),dim3(64), 0, 0, d_frame_in, d_frame_out, Image1->height, Image1->widthStep);
hipMemcpy(dis, d_frame_out, sizeof(char)*(Image1->imageSize), hipMemcpyDeviceToHost);
for(int y=0; y<Image1->height; y++)
{
for(int x=0; x<Image1->width; x++)
{
for(int z=0;z<3;z++){
Image2->imageData[y*Image1->widthStep+3*x+z] = dis[y*Image1->widthStep+3*x+z];
}
}
}
cvShowImage("readImage", Image1);
cvShowImage("newImage", Image2);
cvWaitKey(0);
free(frame);
free(dis);
hipFree(d_frame_in);
hipFree(d_frame_out);
cvDestroyWindow("readImage");
cvDestroyWindow("newImage");
}
|
dca2c042cea61e3dd8e52ee72f0d5dcbafaeae09.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
#include <opencv2/core/core.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv.hpp>
__global__ void rotate(char *d_frame_in, char *d_frame_out, int height, int width)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
for(int y=0; y<height; y++)
{
for(int z=0; z<3; z++){
d_frame_out[y*width+3*x+z] = d_frame_in[y*width+3*(width-x)+z];
}
}
}
int main()
{
IplImage *Image1 = cvLoadImage("lena.jpg", 1);
IplImage *Image2 = cvCreateImage(cvSize(Image1->width, Image1->height), IPL_DEPTH_8U, 3);
if(Image1 == NULL) return 0;
cvNamedWindow("readImage", CV_WINDOW_AUTOSIZE);
cvNamedWindow("newImage", CV_WINDOW_AUTOSIZE);
char *frame = (char*)calloc(Image1->imageSize,sizeof(char));
char *dis = (char*)calloc(Image1->imageSize,sizeof(char));
for(int y=0; y<Image1->height; y++)
{
for(int x=0; x<Image1->width; x++)
{
for(int z=0; z<3; z++) {
frame[y*Image1->widthStep+3*x+z] = Image1->imageData[y*Image1->widthStep+3*x+z];
}
}
}
char *d_frame_in;
char *d_frame_out;
cudaMalloc((void**)&d_frame_in, sizeof(char)*(Image1->imageSize));
cudaMalloc((void**)&d_frame_out, sizeof(char)*(Image1->imageSize));
cudaMemcpy(d_frame_in, frame, sizeof(char)*(Image1->imageSize), cudaMemcpyHostToDevice);
rotate<<<16,64>>>(d_frame_in, d_frame_out, Image1->height, Image1->widthStep);
cudaMemcpy(dis, d_frame_out, sizeof(char)*(Image1->imageSize), cudaMemcpyDeviceToHost);
for(int y=0; y<Image1->height; y++)
{
for(int x=0; x<Image1->width; x++)
{
for(int z=0;z<3;z++){
Image2->imageData[y*Image1->widthStep+3*x+z] = dis[y*Image1->widthStep+3*x+z];
}
}
}
cvShowImage("readImage", Image1);
cvShowImage("newImage", Image2);
cvWaitKey(0);
free(frame);
free(dis);
cudaFree(d_frame_in);
cudaFree(d_frame_out);
cvDestroyWindow("readImage");
cvDestroyWindow("newImage");
}
|
3b54df7e8e368a716f9b84000495451f16d5e909.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include "kernel_hip.cuh"
__global__ void mc_kernel_call(float * d_s, float T, float K, float S0, float sigma, float mu, float r, float dt, float * d_normals, unsigned N_STEPS, unsigned N_PATHS)
{
const unsigned tid = threadIdx.x; // id du thread dans le bloc
const unsigned bid = blockIdx.x; // id du bloc
const unsigned bsz = blockDim.x; // taille du bloc
int s_idx = tid + bid * bsz;
int n_idx = tid + bid * bsz;
float s_curr = S0;
if (s_idx < N_PATHS) {
int n = 0;
do {
s_curr = s_curr + mu*s_curr*dt + sigma*s_curr*d_normals[n_idx];
n_idx++;
n++;
} while (n < N_STEPS);
double payoff = (s_curr>K ? s_curr - K : 0.0);
__syncthreads(); // on attend que tous les threads aient fini avant de passer la prochaine simulation
d_s[s_idx] = exp(-r*T) * payoff;
}
}
__global__ void mc_kernel_put(float * d_s, float T, float K, float S0, float sigma, float mu, float r, float dt, float * d_normals, unsigned N_STEPS, unsigned N_PATHS)
{
const unsigned tid = threadIdx.x;
const unsigned bid = blockIdx.x;
const unsigned bsz = blockDim.x;
int s_idx = tid + bid * bsz;
int n_idx = tid + bid * bsz;
float s_curr = S0;
if (s_idx < N_PATHS) {
int n = 0;
do {
s_curr = s_curr + mu*s_curr*dt + sigma*s_curr*d_normals[n_idx];
n_idx++;
n++;
} while (n < N_STEPS);
double payoff = (s_curr<K ? K - s_curr : 0.0);
__syncthreads();
d_s[s_idx] = exp(-r*T) * payoff;
}
}
// wrapper pour une option d'achat
void mc_call_GPU(float * d_s, float T, float K, float S0, float sigma, float mu, float r, float dt, float * d_normals, unsigned N_STEPS, unsigned N_PATHS)
{
const unsigned BLOCK_SIZE = 1024; // utilisation de 1024 threads par bloc
const unsigned GRID_SIZE = ceil(float(N_PATHS) / float(BLOCK_SIZE)); // nombre de blocs ncessaires pour N_PATHS
hipLaunchKernelGGL(( mc_kernel_call) , dim3(GRID_SIZE), dim3(BLOCK_SIZE) , 0, 0, d_s, T, K, S0, sigma, mu, r, dt, d_normals, N_STEPS, N_PATHS); // appel de la fonction paralllise pour la simulation du prix du sous jacent
}
// wrapper pour une option de vente
void mc_put_GPU(float * d_s, float T, float K, float S0, float sigma, float mu, float r, float dt, float * d_normals, unsigned N_STEPS, unsigned N_PATHS)
{
const unsigned BLOCK_SIZE = 1024;
const unsigned GRID_SIZE = ceil(float(N_PATHS) / float(BLOCK_SIZE));
mc_kernel_put <<<GRID_SIZE, BLOCK_SIZE >> >(d_s, T, K, S0, sigma, mu, r, dt, d_normals, N_STEPS, N_PATHS);
}
|
3b54df7e8e368a716f9b84000495451f16d5e909.cu
|
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include "kernel.cuh"
__global__ void mc_kernel_call(float * d_s, float T, float K, float S0, float sigma, float mu, float r, float dt, float * d_normals, unsigned N_STEPS, unsigned N_PATHS)
{
const unsigned tid = threadIdx.x; // id du thread dans le bloc
const unsigned bid = blockIdx.x; // id du bloc
const unsigned bsz = blockDim.x; // taille du bloc
int s_idx = tid + bid * bsz;
int n_idx = tid + bid * bsz;
float s_curr = S0;
if (s_idx < N_PATHS) {
int n = 0;
do {
s_curr = s_curr + mu*s_curr*dt + sigma*s_curr*d_normals[n_idx];
n_idx++;
n++;
} while (n < N_STEPS);
double payoff = (s_curr>K ? s_curr - K : 0.0);
__syncthreads(); // on attend que tous les threads aient fini avant de passer à la prochaine simulation
d_s[s_idx] = exp(-r*T) * payoff;
}
}
__global__ void mc_kernel_put(float * d_s, float T, float K, float S0, float sigma, float mu, float r, float dt, float * d_normals, unsigned N_STEPS, unsigned N_PATHS)
{
const unsigned tid = threadIdx.x;
const unsigned bid = blockIdx.x;
const unsigned bsz = blockDim.x;
int s_idx = tid + bid * bsz;
int n_idx = tid + bid * bsz;
float s_curr = S0;
if (s_idx < N_PATHS) {
int n = 0;
do {
s_curr = s_curr + mu*s_curr*dt + sigma*s_curr*d_normals[n_idx];
n_idx++;
n++;
} while (n < N_STEPS);
double payoff = (s_curr<K ? K - s_curr : 0.0);
__syncthreads();
d_s[s_idx] = exp(-r*T) * payoff;
}
}
// wrapper pour une option d'achat
void mc_call_GPU(float * d_s, float T, float K, float S0, float sigma, float mu, float r, float dt, float * d_normals, unsigned N_STEPS, unsigned N_PATHS)
{
const unsigned BLOCK_SIZE = 1024; // utilisation de 1024 threads par bloc
const unsigned GRID_SIZE = ceil(float(N_PATHS) / float(BLOCK_SIZE)); // nombre de blocs nécessaires pour N_PATHS
mc_kernel_call <<<GRID_SIZE, BLOCK_SIZE >>>(d_s, T, K, S0, sigma, mu, r, dt, d_normals, N_STEPS, N_PATHS); // appel de la fonction parallélisée pour la simulation du prix du sous jacent
}
// wrapper pour une option de vente
void mc_put_GPU(float * d_s, float T, float K, float S0, float sigma, float mu, float r, float dt, float * d_normals, unsigned N_STEPS, unsigned N_PATHS)
{
const unsigned BLOCK_SIZE = 1024;
const unsigned GRID_SIZE = ceil(float(N_PATHS) / float(BLOCK_SIZE));
mc_kernel_put <<<GRID_SIZE, BLOCK_SIZE >> >(d_s, T, K, S0, sigma, mu, r, dt, d_normals, N_STEPS, N_PATHS);
}
|
7de5c59e83d143a7f91e60d3d2f389e188f3ce63.hip
|
// !!! This is a file automatically generated by hipify!!!
//==============================================================================================//
#include <hip/hip_runtime.h>
#include "../Utils/cudaUtil.h"
#include "CUDABasedRasterizationInput.h"
#include "../Utils/CameraUtil.h"
#include "../Utils/IndexHelper.h"
#include "../Utils/cuda_SimpleMatrixUtil.h"
#include "../Utils/RendererUtil.h"
#ifndef FLT_MAX
#define FLT_MAX 1000000
#endif
//==============================================================================================//
//Render buffers
//==============================================================================================//
/*
Initializes camera data
*/
__global__ void initializeCamerasDevice(CUDABasedRasterizationInput input)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < 1)
{
for (int idc = 0; idc < input.numberOfCameras; idc++)
{
float4x4 h_intrinsics;
float4x4 h_extrinsics;
h_extrinsics.setIdentity();
h_intrinsics.setIdentity();
for (int row = 0; row < 3; row++)
{
h_intrinsics(row, 0) = input.d_cameraIntrinsics[3 * idc + row].x;
h_intrinsics(row, 1) = input.d_cameraIntrinsics[3 * idc + row].y;
h_intrinsics(row, 2) = input.d_cameraIntrinsics[3 * idc + row].z;
h_intrinsics(row, 3) = 0.f;
h_extrinsics(row, 0) = input.d_cameraExtrinsics[3 * idc + row].x;
h_extrinsics(row, 1) = input.d_cameraExtrinsics[3 * idc + row].y;
h_extrinsics(row, 2) = input.d_cameraExtrinsics[3 * idc + row].z;
h_extrinsics(row, 3) = input.d_cameraExtrinsics[3 * idc + row].w;
}
float4x4 h_inExtrinsics = h_extrinsics.getInverse();
float4x4 h_invProjection = (h_intrinsics * h_extrinsics).getInverse();
for (int row = 0; row < 4; row++)
{
input.d_inverseExtrinsics[4 * idc + row].x = h_inExtrinsics(row, 0);
input.d_inverseExtrinsics[4 * idc + row].y = h_inExtrinsics(row, 1);
input.d_inverseExtrinsics[4 * idc + row].z = h_inExtrinsics(row, 2);
input.d_inverseExtrinsics[4 * idc + row].w = h_inExtrinsics(row, 3);
input.d_inverseProjection[4 * idc + row].x = h_invProjection(row, 0);
input.d_inverseProjection[4 * idc + row].y = h_invProjection(row, 1);
input.d_inverseProjection[4 * idc + row].z = h_invProjection(row, 2);
input.d_inverseProjection[4 * idc + row].w = h_invProjection(row, 3);
}
}
}
}
//==============================================================================================//
/*
Initializes all arrays
*/
__global__ void initializeDevice(CUDABasedRasterizationInput input)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<input.w*input.h*input.numberOfCameras)
{
input.d_depthBuffer[idx] = INT_MAX;
input.d_faceIDBuffer[idx] = -1;
input.d_barycentricCoordinatesBuffer[2 * idx + 0] = 0.f;
input.d_barycentricCoordinatesBuffer[2 * idx + 1] = 0.f;
input.d_renderBuffer[3 * idx + 0] = 0.f;
input.d_renderBuffer[3 * idx + 1] = 1.f;
input.d_renderBuffer[3 * idx + 2] = 0.f;
}
}
//==============================================================================================//
/*
Project the vertices into the image plane and store depth value
*/
__global__ void projectVerticesDevice(CUDABasedRasterizationInput input)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < input.numberOfCameras * input.N)
{
int2 index = index1DTo2D(input.numberOfCameras, input.N, idx);
int idc = index.x;
int idv = index.y;
float3 v0 = input.d_vertices[idv];
float3 c_v0 = getCamSpacePoint(&input.d_cameraExtrinsics[3 * idc], v0);
float3 i_v0 = projectPointFloat3(&input.d_cameraIntrinsics[3 * idc], c_v0);
input.d_projectedVertices[idx] = i_v0;
}
}
//==============================================================================================//
/*
Computes the face normals
*/
__global__ void renderFaceNormalDevice(CUDABasedRasterizationInput input)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < input.numberOfCameras * input.F)
{
int2 index = index1DTo2D(input.numberOfCameras, input.F, idx);
int idf = index.y;
int indexv0 = input.d_facesVertex[idf].x;
int indexv1 = input.d_facesVertex[idf].y;
int indexv2 = input.d_facesVertex[idf].z;
float3 v0 = input.d_vertices[indexv0];
float3 v1 = input.d_vertices[indexv1];
float3 v2 = input.d_vertices[indexv2];
input.d_faceNormal[idx] = cross(v1 - v0, v2 - v0);
}
}
//==============================================================================================//
/*
Computes the vertex normals
*/
__global__ void renderVertexNormalDevice(CUDABasedRasterizationInput input)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < input.numberOfCameras * input.N)
{
int2 index = index1DTo2D(input.numberOfCameras, input.N, idx);
int idv = index.y;
int2 verFaceId = input.d_vertexFacesId[idv];
float3 vertNorm;
for (int i = verFaceId.x; i<verFaceId.x + verFaceId.y; i++)
{
int faceId = input.d_vertexFaces[i];
if (i == verFaceId.x)
vertNorm = input.d_faceNormal[faceId];
else
{
vertNorm.x = vertNorm.x + input.d_faceNormal[faceId].x;
vertNorm.y = vertNorm.y + input.d_faceNormal[faceId].y;
vertNorm.z = vertNorm.z + input.d_faceNormal[faceId].z;
}
}
input.d_vertexNormal[idx] = vertNorm;
}
}
//==============================================================================================//
/*
Project the vertices into the image plane,
computes the 2D bounding box per triangle in the image plane
and computes the maximum bounding box for all triangles of the mesh
*/
__global__ void projectFacesDevice(CUDABasedRasterizationInput input)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < input.numberOfCameras * input.F)
{
int2 index = index1DTo2D(input.numberOfCameras, input.F, idx);
int idc = index.x;
int idf = index.y;
int indexv0 = input.d_facesVertex[idf].x;
int indexv1 = input.d_facesVertex[idf].y;
int indexv2 = input.d_facesVertex[idf].z;
float3 i_v0 = input.d_projectedVertices[idc* input.N + indexv0];
float3 i_v1 = input.d_projectedVertices[idc* input.N + indexv1];
float3 i_v2 = input.d_projectedVertices[idc* input.N + indexv2];
input.d_BBoxes[idx].x = fmaxf(fminf(i_v0.x, fminf(i_v1.x, i_v2.x)) - 0.5f, 0); //minx
input.d_BBoxes[idx].y = fmaxf(fminf(i_v0.y, fminf(i_v1.y, i_v2.y)) - 0.5f, 0); //miny
input.d_BBoxes[idx].z = fminf(fmaxf(i_v0.x, fmaxf(i_v1.x, i_v2.x)) + 0.5f, input.w - 1); //maxx
input.d_BBoxes[idx].w = fminf(fmaxf(i_v0.y, fmaxf(i_v1.y, i_v2.y)) + 0.5f, input.h - 1); //maxy
}
}
//==============================================================================================//
/*
Render the depth, faceId and barycentricCoordinates buffers
*/
__global__ void renderDepthBufferDevice(CUDABasedRasterizationInput input)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < input.numberOfCameras * input.F)
{
int2 index = index1DTo2D(input.numberOfCameras, input.F, idx);
int idc = index.x;
int idf = index.y;
int indexv0 = input.d_facesVertex[idf].x;
int indexv1 = input.d_facesVertex[idf].y;
int indexv2 = input.d_facesVertex[idf].z;
float3 vertex0 = input.d_projectedVertices[input.N*idc + indexv0];
float3 vertex1 = input.d_projectedVertices[input.N*idc + indexv1];
float3 vertex2 = input.d_projectedVertices[input.N*idc + indexv2];
for (int u = input.d_BBoxes[idx].x; u <= input.d_BBoxes[idx].z; u++)
{
for (int v = input.d_BBoxes[idx].y; v <= input.d_BBoxes[idx].w; v++)
{
float2 pixelCenter1 = make_float2(u + 0.5f, v + 0.5f);
float3 abc = uv2barycentric(pixelCenter1.x, pixelCenter1.y, input.d_vertices[indexv0], input.d_vertices[indexv1], input.d_vertices[indexv2], input.d_inverseExtrinsics + idc * 4, input.d_inverseProjection + idc * 4);
float z = FLT_MAX;
bool isInsideTriangle = (abc.x >= -0.001f) && (abc.y >= -0.001f) && (abc.z >= -0.001f) && (abc.x <= 1.001f) && (abc.y <= 1.001f) && (abc.z <= 1.001f);
if (isInsideTriangle)
{
z = 1.f / (abc.x / vertex0.z + abc.y / vertex1.z + abc.z / vertex2.z); //Perspective-Correct Interpolation
z *= 10000.f;
int pixelId = idc* input.w* input.h + input.w * v + u;
atomicMin(&input.d_depthBuffer[pixelId], z);
}
}
}
}
}
//==============================================================================================//
/*
Render the faceId and barycentricCoordinates buffers
*/
__global__ void renderBuffersDevice(CUDABasedRasterizationInput input)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < input.numberOfCameras * input.F)
{
int2 index = index1DTo2D(input.numberOfCameras, input.F, idx);
int idc = index.x;
int idf = index.y;
int indexv0 = input.d_facesVertex[idf].x;
int indexv1 = input.d_facesVertex[idf].y;
int indexv2 = input.d_facesVertex[idf].z;
float3 vertex0 = input.d_projectedVertices[input.N*idc + indexv0];
float3 vertex1 = input.d_projectedVertices[input.N*idc + indexv1];
float3 vertex2 = input.d_projectedVertices[input.N*idc + indexv2];
for (int u = input.d_BBoxes[idx].x; u <= input.d_BBoxes[idx].z; u++)
{
for (int v = input.d_BBoxes[idx].y; v <= input.d_BBoxes[idx].w; v++)
{
float2 pixelCenter1 = make_float2(u + 0.5f, v + 0.5f);
float3 abc = uv2barycentric(pixelCenter1.x, pixelCenter1.y, input.d_vertices[indexv0], input.d_vertices[indexv1], input.d_vertices[indexv2], input.d_inverseExtrinsics + idc * 4, input.d_inverseProjection + idc * 4);
bool isInsideTriangle = (abc.x >= -0.001f) && (abc.y >= -0.001f) && (abc.z >= -0.001f) && (abc.x <= 1.001f) && (abc.y <= 1.001f) && (abc.z <= 1.001f);
float z = 1.f / (abc.x / vertex0.z + abc.y / vertex1.z + abc.z / vertex2.z); //Perspective-Correct Interpolation
z *= 10000.f;
int pixelId = idc* input.w* input.h + input.w * v + u;
if (isInsideTriangle && (int)z == input.d_depthBuffer[pixelId])
{
int pixelId1 = 2 * idc* input.w * input.h + 2 * input.w * v + 2 * u;
int pixelId2 = 3 * idc* input.w * input.h + 3 * input.w * v + 3 * u;
//face buffer
input.d_faceIDBuffer[pixelId] = idf;
//barycentric buffer
input.d_barycentricCoordinatesBuffer[pixelId1 + 0] = abc.x;
input.d_barycentricCoordinatesBuffer[pixelId1 + 1] = abc.y;
//get pix normal
float3 v0_norm = input.d_vertexNormal[input.N*idc + indexv0];
float3 v1_norm = input.d_vertexNormal[input.N*idc + indexv1];
float3 v2_norm = input.d_vertexNormal[input.N*idc + indexv2];
float3 pixNorm = v0_norm * abc.x + v1_norm * abc.y + v2_norm * abc.z;
pixNorm = pixNorm / length(pixNorm);
//get normal flip
float3 o = make_float3(0.f, 0.f, 0.f);
float3 d = make_float3(0.f, 0.f, 0.f);
getRayCuda2(pixelCenter1, o, d, input.d_inverseExtrinsics + idc * 4, input.d_inverseProjection + idc * 4);
if (dot(pixNorm, d) > 0.f)
pixNorm = -pixNorm;
float3 color = make_float3(0.f,0.f,0.f);
//albedo
if (input.albedoMode == AlbedoMode::Textured)
{
float2 texCoord0 = make_float2(input.d_textureCoordinates[idf * 3 * 2 + 0 * 2 + 0], 1.f - input.d_textureCoordinates[idf * 3 * 2 + 0 * 2 + 1]);
float2 texCoord1 = make_float2(input.d_textureCoordinates[idf * 3 * 2 + 1 * 2 + 0], 1.f - input.d_textureCoordinates[idf * 3 * 2 + 1 * 2 + 1]);
float2 texCoord2 = make_float2(input.d_textureCoordinates[idf * 3 * 2 + 2 * 2 + 0], 1.f - input.d_textureCoordinates[idf * 3 * 2 + 2 * 2 + 1]);
float2 finalTexCoord = texCoord0* abc.x + texCoord1* abc.y + texCoord2* abc.z;
finalTexCoord.x = finalTexCoord.x * input.texWidth;
finalTexCoord.y = finalTexCoord.y * input.texHeight;
finalTexCoord.x = fmaxf(finalTexCoord.x, 0);
finalTexCoord.x = fminf(finalTexCoord.x, input.texWidth - 1);
finalTexCoord.y = fmaxf(finalTexCoord.y, 0);
finalTexCoord.y = fminf(finalTexCoord.y, input.texHeight - 1);
float U0 = finalTexCoord.x;
float V0 = finalTexCoord.y;
float LU = int(finalTexCoord.x - 0.5f) + 0.5f;
float HU = int(finalTexCoord.x - 0.5f) + 1.5f;
float LV = int(finalTexCoord.y - 0.5f) + 0.5f;
float HV = int(finalTexCoord.y - 0.5f) + 1.5f;
float3 colorLULV = make_float3(
input.d_textureMap[3 * input.texWidth *(int)LV + 3 * (int)LU + 0],
input.d_textureMap[3 * input.texWidth *(int)LV + 3 * (int)LU + 1],
input.d_textureMap[3 * input.texWidth *(int)LV + 3 * (int)LU + 2]);
float3 colorLUHV = make_float3(
input.d_textureMap[3 * input.texWidth *(int)HV + 3 * (int)LU + 0],
input.d_textureMap[3 * input.texWidth *(int)HV + 3 * (int)LU + 1],
input.d_textureMap[3 * input.texWidth *(int)HV + 3 * (int)LU + 2]);
float3 colorHULV = make_float3(
input.d_textureMap[3 * input.texWidth *(int)LV + 3 * (int)HU + 0],
input.d_textureMap[3 * input.texWidth *(int)LV + 3 * (int)HU + 1],
input.d_textureMap[3 * input.texWidth *(int)LV + 3 * (int)HU + 2]);
float3 colorHUHV = make_float3(
input.d_textureMap[3 * input.texWidth *(int)HV + 3 * (int)HU + 0],
input.d_textureMap[3 * input.texWidth *(int)HV + 3 * (int)HU + 1],
input.d_textureMap[3 * input.texWidth *(int)HV + 3 * (int)HU + 2]);
float weightLULV = (V0 - LV) * (U0 - LU);
float weightLUHV = (HV - V0) * (U0 - LU);
float weightHULV = (V0 - LV) * (HU - U0);
float weightHUHV = (HV - V0) * (HU - U0);
//printf("%f", weightLULV + weightLUHV + weightHULV + weightHUHV);
//color = weightLULV * colorLULV + weightHULV * colorHULV + weightLUHV * colorLUHV + weightHUHV * colorHUHV;
color = colorLULV;
}
else if (input.albedoMode == AlbedoMode::VertexColor)
{
color = make_float3(
input.d_vertexColor[indexv0].x * abc.x + input.d_vertexColor[indexv1].x * abc.y + input.d_vertexColor[indexv2].x * abc.z,
input.d_vertexColor[indexv0].y * abc.x + input.d_vertexColor[indexv1].y * abc.y + input.d_vertexColor[indexv2].y * abc.z,
input.d_vertexColor[indexv0].z * abc.x + input.d_vertexColor[indexv1].z * abc.y + input.d_vertexColor[indexv2].z * abc.z);
}
else if (input.albedoMode == AlbedoMode::Normal)
{
color = make_float3((1.f + pixNorm.x) / 2.f, (1.f + pixNorm.y) / 2.f, (1.f + pixNorm.z) / 2.f);
}
else if (input.albedoMode == AlbedoMode::Lighting)
{
color = make_float3(1.f, 1.f, 1.f);
}
else if (input.albedoMode == AlbedoMode::ForegroundMask)
{
color = make_float3(1.f, 1.f, 1.f);
}
//shading
if ((input.shadingMode == ShadingMode::Shaded && (input.albedoMode != AlbedoMode::Normal)) || input.albedoMode == AlbedoMode::Lighting)
{
color = getShading(color, pixNorm, input.d_shCoeff + (idc * 27));
}
input.d_renderBuffer[pixelId2 + 0] = color.x;
input.d_renderBuffer[pixelId2 + 1] = color.y;
input.d_renderBuffer[pixelId2 + 2] = color.z;
}
}
}
}
}
//==============================================================================================//
/*
Render the normal map buffers
*/
__global__ void renderNormalMapDevice(CUDABasedRasterizationInput input)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < input.texHeight * input.texWidth)
{
int2 index = index1DTo2D(input.texHeight, input.texWidth, idx);
int pixV = index.x;
int pixU = index.y;
float4 pixInfo = input.d_textureMapIds[pixV * input.texWidth + pixU];
int idf = pixInfo.x;
float3 abc = make_float3(pixInfo.y, pixInfo.z, pixInfo.w);
int indexv0 = input.d_facesVertex[idf].x;
int indexv1 = input.d_facesVertex[idf].y;
int indexv2 = input.d_facesVertex[idf].z;
//get pix normal
float3 v0_norm = input.d_vertexNormal[indexv0];
float3 v1_norm = input.d_vertexNormal[indexv1];
float3 v2_norm = input.d_vertexNormal[indexv2];
float3 pixNorm = v0_norm * abc.x + v1_norm * abc.y + v2_norm * abc.z;
if (length(pixNorm) != 0.f)
pixNorm = pixNorm / length(pixNorm);
pixNorm = (pixNorm + make_float3(1.f, 1.f, 1.f)) / 2.f;
input.d_normalMap[pixV * input.texWidth + pixU] = pixNorm;
}
}
//==============================================================================================//
extern "C" void renderBuffersGPU(CUDABasedRasterizationInput& input)
{
initializeCamerasDevice << < 1, 1 >> > (input);
initializeDevice << <(input.w*input.h*input.numberOfCameras + THREADS_PER_BLOCK_CUDABASEDRASTERIZER - 1) / THREADS_PER_BLOCK_CUDABASEDRASTERIZER, THREADS_PER_BLOCK_CUDABASEDRASTERIZER >> > (input);
projectVerticesDevice << <(input.N*input.numberOfCameras + THREADS_PER_BLOCK_CUDABASEDRASTERIZER - 1) / THREADS_PER_BLOCK_CUDABASEDRASTERIZER, THREADS_PER_BLOCK_CUDABASEDRASTERIZER >> >(input);
projectFacesDevice << <(input.F*input.numberOfCameras + THREADS_PER_BLOCK_CUDABASEDRASTERIZER - 1) / THREADS_PER_BLOCK_CUDABASEDRASTERIZER, THREADS_PER_BLOCK_CUDABASEDRASTERIZER >> >(input);
renderFaceNormalDevice << <(input.F*input.numberOfCameras + THREADS_PER_BLOCK_CUDABASEDRASTERIZER - 1) / THREADS_PER_BLOCK_CUDABASEDRASTERIZER, THREADS_PER_BLOCK_CUDABASEDRASTERIZER >> >(input);
renderVertexNormalDevice << <(input.N*input.numberOfCameras + THREADS_PER_BLOCK_CUDABASEDRASTERIZER - 1) / THREADS_PER_BLOCK_CUDABASEDRASTERIZER, THREADS_PER_BLOCK_CUDABASEDRASTERIZER >> >(input);
if (input.computeNormal)
{
renderNormalMapDevice << <(input.texWidth*input.texHeight + THREADS_PER_BLOCK_CUDABASEDRASTERIZER - 1) / THREADS_PER_BLOCK_CUDABASEDRASTERIZER, THREADS_PER_BLOCK_CUDABASEDRASTERIZER >> > (input);
}
else
{
renderDepthBufferDevice << <(input.F*input.numberOfCameras + THREADS_PER_BLOCK_CUDABASEDRASTERIZER - 1) / THREADS_PER_BLOCK_CUDABASEDRASTERIZER, THREADS_PER_BLOCK_CUDABASEDRASTERIZER >> >(input);
renderBuffersDevice << <(input.F*input.numberOfCameras + THREADS_PER_BLOCK_CUDABASEDRASTERIZER - 1) / THREADS_PER_BLOCK_CUDABASEDRASTERIZER, THREADS_PER_BLOCK_CUDABASEDRASTERIZER >> > (input);
}
}
|
7de5c59e83d143a7f91e60d3d2f389e188f3ce63.cu
|
//==============================================================================================//
#include <cuda_runtime.h>
#include "../Utils/cudaUtil.h"
#include "CUDABasedRasterizationInput.h"
#include "../Utils/CameraUtil.h"
#include "../Utils/IndexHelper.h"
#include "../Utils/cuda_SimpleMatrixUtil.h"
#include "../Utils/RendererUtil.h"
#ifndef FLT_MAX
#define FLT_MAX 1000000
#endif
//==============================================================================================//
//Render buffers
//==============================================================================================//
/*
Initializes camera data
*/
__global__ void initializeCamerasDevice(CUDABasedRasterizationInput input)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < 1)
{
for (int idc = 0; idc < input.numberOfCameras; idc++)
{
float4x4 h_intrinsics;
float4x4 h_extrinsics;
h_extrinsics.setIdentity();
h_intrinsics.setIdentity();
for (int row = 0; row < 3; row++)
{
h_intrinsics(row, 0) = input.d_cameraIntrinsics[3 * idc + row].x;
h_intrinsics(row, 1) = input.d_cameraIntrinsics[3 * idc + row].y;
h_intrinsics(row, 2) = input.d_cameraIntrinsics[3 * idc + row].z;
h_intrinsics(row, 3) = 0.f;
h_extrinsics(row, 0) = input.d_cameraExtrinsics[3 * idc + row].x;
h_extrinsics(row, 1) = input.d_cameraExtrinsics[3 * idc + row].y;
h_extrinsics(row, 2) = input.d_cameraExtrinsics[3 * idc + row].z;
h_extrinsics(row, 3) = input.d_cameraExtrinsics[3 * idc + row].w;
}
float4x4 h_inExtrinsics = h_extrinsics.getInverse();
float4x4 h_invProjection = (h_intrinsics * h_extrinsics).getInverse();
for (int row = 0; row < 4; row++)
{
input.d_inverseExtrinsics[4 * idc + row].x = h_inExtrinsics(row, 0);
input.d_inverseExtrinsics[4 * idc + row].y = h_inExtrinsics(row, 1);
input.d_inverseExtrinsics[4 * idc + row].z = h_inExtrinsics(row, 2);
input.d_inverseExtrinsics[4 * idc + row].w = h_inExtrinsics(row, 3);
input.d_inverseProjection[4 * idc + row].x = h_invProjection(row, 0);
input.d_inverseProjection[4 * idc + row].y = h_invProjection(row, 1);
input.d_inverseProjection[4 * idc + row].z = h_invProjection(row, 2);
input.d_inverseProjection[4 * idc + row].w = h_invProjection(row, 3);
}
}
}
}
//==============================================================================================//
/*
Initializes all arrays
*/
__global__ void initializeDevice(CUDABasedRasterizationInput input)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<input.w*input.h*input.numberOfCameras)
{
input.d_depthBuffer[idx] = INT_MAX;
input.d_faceIDBuffer[idx] = -1;
input.d_barycentricCoordinatesBuffer[2 * idx + 0] = 0.f;
input.d_barycentricCoordinatesBuffer[2 * idx + 1] = 0.f;
input.d_renderBuffer[3 * idx + 0] = 0.f;
input.d_renderBuffer[3 * idx + 1] = 1.f;
input.d_renderBuffer[3 * idx + 2] = 0.f;
}
}
//==============================================================================================//
/*
Project the vertices into the image plane and store depth value
*/
__global__ void projectVerticesDevice(CUDABasedRasterizationInput input)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < input.numberOfCameras * input.N)
{
int2 index = index1DTo2D(input.numberOfCameras, input.N, idx);
int idc = index.x;
int idv = index.y;
float3 v0 = input.d_vertices[idv];
float3 c_v0 = getCamSpacePoint(&input.d_cameraExtrinsics[3 * idc], v0);
float3 i_v0 = projectPointFloat3(&input.d_cameraIntrinsics[3 * idc], c_v0);
input.d_projectedVertices[idx] = i_v0;
}
}
//==============================================================================================//
/*
Computes the face normals
*/
__global__ void renderFaceNormalDevice(CUDABasedRasterizationInput input)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < input.numberOfCameras * input.F)
{
int2 index = index1DTo2D(input.numberOfCameras, input.F, idx);
int idf = index.y;
int indexv0 = input.d_facesVertex[idf].x;
int indexv1 = input.d_facesVertex[idf].y;
int indexv2 = input.d_facesVertex[idf].z;
float3 v0 = input.d_vertices[indexv0];
float3 v1 = input.d_vertices[indexv1];
float3 v2 = input.d_vertices[indexv2];
input.d_faceNormal[idx] = cross(v1 - v0, v2 - v0);
}
}
//==============================================================================================//
/*
Computes the vertex normals
*/
__global__ void renderVertexNormalDevice(CUDABasedRasterizationInput input)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < input.numberOfCameras * input.N)
{
int2 index = index1DTo2D(input.numberOfCameras, input.N, idx);
int idv = index.y;
int2 verFaceId = input.d_vertexFacesId[idv];
float3 vertNorm;
for (int i = verFaceId.x; i<verFaceId.x + verFaceId.y; i++)
{
int faceId = input.d_vertexFaces[i];
if (i == verFaceId.x)
vertNorm = input.d_faceNormal[faceId];
else
{
vertNorm.x = vertNorm.x + input.d_faceNormal[faceId].x;
vertNorm.y = vertNorm.y + input.d_faceNormal[faceId].y;
vertNorm.z = vertNorm.z + input.d_faceNormal[faceId].z;
}
}
input.d_vertexNormal[idx] = vertNorm;
}
}
//==============================================================================================//
/*
Project the vertices into the image plane,
computes the 2D bounding box per triangle in the image plane
and computes the maximum bounding box for all triangles of the mesh
*/
__global__ void projectFacesDevice(CUDABasedRasterizationInput input)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < input.numberOfCameras * input.F)
{
int2 index = index1DTo2D(input.numberOfCameras, input.F, idx);
int idc = index.x;
int idf = index.y;
int indexv0 = input.d_facesVertex[idf].x;
int indexv1 = input.d_facesVertex[idf].y;
int indexv2 = input.d_facesVertex[idf].z;
float3 i_v0 = input.d_projectedVertices[idc* input.N + indexv0];
float3 i_v1 = input.d_projectedVertices[idc* input.N + indexv1];
float3 i_v2 = input.d_projectedVertices[idc* input.N + indexv2];
input.d_BBoxes[idx].x = fmaxf(fminf(i_v0.x, fminf(i_v1.x, i_v2.x)) - 0.5f, 0); //minx
input.d_BBoxes[idx].y = fmaxf(fminf(i_v0.y, fminf(i_v1.y, i_v2.y)) - 0.5f, 0); //miny
input.d_BBoxes[idx].z = fminf(fmaxf(i_v0.x, fmaxf(i_v1.x, i_v2.x)) + 0.5f, input.w - 1); //maxx
input.d_BBoxes[idx].w = fminf(fmaxf(i_v0.y, fmaxf(i_v1.y, i_v2.y)) + 0.5f, input.h - 1); //maxy
}
}
//==============================================================================================//
/*
Render the depth, faceId and barycentricCoordinates buffers
*/
__global__ void renderDepthBufferDevice(CUDABasedRasterizationInput input)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < input.numberOfCameras * input.F)
{
int2 index = index1DTo2D(input.numberOfCameras, input.F, idx);
int idc = index.x;
int idf = index.y;
int indexv0 = input.d_facesVertex[idf].x;
int indexv1 = input.d_facesVertex[idf].y;
int indexv2 = input.d_facesVertex[idf].z;
float3 vertex0 = input.d_projectedVertices[input.N*idc + indexv0];
float3 vertex1 = input.d_projectedVertices[input.N*idc + indexv1];
float3 vertex2 = input.d_projectedVertices[input.N*idc + indexv2];
for (int u = input.d_BBoxes[idx].x; u <= input.d_BBoxes[idx].z; u++)
{
for (int v = input.d_BBoxes[idx].y; v <= input.d_BBoxes[idx].w; v++)
{
float2 pixelCenter1 = make_float2(u + 0.5f, v + 0.5f);
float3 abc = uv2barycentric(pixelCenter1.x, pixelCenter1.y, input.d_vertices[indexv0], input.d_vertices[indexv1], input.d_vertices[indexv2], input.d_inverseExtrinsics + idc * 4, input.d_inverseProjection + idc * 4);
float z = FLT_MAX;
bool isInsideTriangle = (abc.x >= -0.001f) && (abc.y >= -0.001f) && (abc.z >= -0.001f) && (abc.x <= 1.001f) && (abc.y <= 1.001f) && (abc.z <= 1.001f);
if (isInsideTriangle)
{
z = 1.f / (abc.x / vertex0.z + abc.y / vertex1.z + abc.z / vertex2.z); //Perspective-Correct Interpolation
z *= 10000.f;
int pixelId = idc* input.w* input.h + input.w * v + u;
atomicMin(&input.d_depthBuffer[pixelId], z);
}
}
}
}
}
//==============================================================================================//
/*
Render the faceId and barycentricCoordinates buffers
*/
__global__ void renderBuffersDevice(CUDABasedRasterizationInput input)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < input.numberOfCameras * input.F)
{
int2 index = index1DTo2D(input.numberOfCameras, input.F, idx);
int idc = index.x;
int idf = index.y;
int indexv0 = input.d_facesVertex[idf].x;
int indexv1 = input.d_facesVertex[idf].y;
int indexv2 = input.d_facesVertex[idf].z;
float3 vertex0 = input.d_projectedVertices[input.N*idc + indexv0];
float3 vertex1 = input.d_projectedVertices[input.N*idc + indexv1];
float3 vertex2 = input.d_projectedVertices[input.N*idc + indexv2];
for (int u = input.d_BBoxes[idx].x; u <= input.d_BBoxes[idx].z; u++)
{
for (int v = input.d_BBoxes[idx].y; v <= input.d_BBoxes[idx].w; v++)
{
float2 pixelCenter1 = make_float2(u + 0.5f, v + 0.5f);
float3 abc = uv2barycentric(pixelCenter1.x, pixelCenter1.y, input.d_vertices[indexv0], input.d_vertices[indexv1], input.d_vertices[indexv2], input.d_inverseExtrinsics + idc * 4, input.d_inverseProjection + idc * 4);
bool isInsideTriangle = (abc.x >= -0.001f) && (abc.y >= -0.001f) && (abc.z >= -0.001f) && (abc.x <= 1.001f) && (abc.y <= 1.001f) && (abc.z <= 1.001f);
float z = 1.f / (abc.x / vertex0.z + abc.y / vertex1.z + abc.z / vertex2.z); //Perspective-Correct Interpolation
z *= 10000.f;
int pixelId = idc* input.w* input.h + input.w * v + u;
if (isInsideTriangle && (int)z == input.d_depthBuffer[pixelId])
{
int pixelId1 = 2 * idc* input.w * input.h + 2 * input.w * v + 2 * u;
int pixelId2 = 3 * idc* input.w * input.h + 3 * input.w * v + 3 * u;
//face buffer
input.d_faceIDBuffer[pixelId] = idf;
//barycentric buffer
input.d_barycentricCoordinatesBuffer[pixelId1 + 0] = abc.x;
input.d_barycentricCoordinatesBuffer[pixelId1 + 1] = abc.y;
//get pix normal
float3 v0_norm = input.d_vertexNormal[input.N*idc + indexv0];
float3 v1_norm = input.d_vertexNormal[input.N*idc + indexv1];
float3 v2_norm = input.d_vertexNormal[input.N*idc + indexv2];
float3 pixNorm = v0_norm * abc.x + v1_norm * abc.y + v2_norm * abc.z;
pixNorm = pixNorm / length(pixNorm);
//get normal flip
float3 o = make_float3(0.f, 0.f, 0.f);
float3 d = make_float3(0.f, 0.f, 0.f);
getRayCuda2(pixelCenter1, o, d, input.d_inverseExtrinsics + idc * 4, input.d_inverseProjection + idc * 4);
if (dot(pixNorm, d) > 0.f)
pixNorm = -pixNorm;
float3 color = make_float3(0.f,0.f,0.f);
//albedo
if (input.albedoMode == AlbedoMode::Textured)
{
float2 texCoord0 = make_float2(input.d_textureCoordinates[idf * 3 * 2 + 0 * 2 + 0], 1.f - input.d_textureCoordinates[idf * 3 * 2 + 0 * 2 + 1]);
float2 texCoord1 = make_float2(input.d_textureCoordinates[idf * 3 * 2 + 1 * 2 + 0], 1.f - input.d_textureCoordinates[idf * 3 * 2 + 1 * 2 + 1]);
float2 texCoord2 = make_float2(input.d_textureCoordinates[idf * 3 * 2 + 2 * 2 + 0], 1.f - input.d_textureCoordinates[idf * 3 * 2 + 2 * 2 + 1]);
float2 finalTexCoord = texCoord0* abc.x + texCoord1* abc.y + texCoord2* abc.z;
finalTexCoord.x = finalTexCoord.x * input.texWidth;
finalTexCoord.y = finalTexCoord.y * input.texHeight;
finalTexCoord.x = fmaxf(finalTexCoord.x, 0);
finalTexCoord.x = fminf(finalTexCoord.x, input.texWidth - 1);
finalTexCoord.y = fmaxf(finalTexCoord.y, 0);
finalTexCoord.y = fminf(finalTexCoord.y, input.texHeight - 1);
float U0 = finalTexCoord.x;
float V0 = finalTexCoord.y;
float LU = int(finalTexCoord.x - 0.5f) + 0.5f;
float HU = int(finalTexCoord.x - 0.5f) + 1.5f;
float LV = int(finalTexCoord.y - 0.5f) + 0.5f;
float HV = int(finalTexCoord.y - 0.5f) + 1.5f;
float3 colorLULV = make_float3(
input.d_textureMap[3 * input.texWidth *(int)LV + 3 * (int)LU + 0],
input.d_textureMap[3 * input.texWidth *(int)LV + 3 * (int)LU + 1],
input.d_textureMap[3 * input.texWidth *(int)LV + 3 * (int)LU + 2]);
float3 colorLUHV = make_float3(
input.d_textureMap[3 * input.texWidth *(int)HV + 3 * (int)LU + 0],
input.d_textureMap[3 * input.texWidth *(int)HV + 3 * (int)LU + 1],
input.d_textureMap[3 * input.texWidth *(int)HV + 3 * (int)LU + 2]);
float3 colorHULV = make_float3(
input.d_textureMap[3 * input.texWidth *(int)LV + 3 * (int)HU + 0],
input.d_textureMap[3 * input.texWidth *(int)LV + 3 * (int)HU + 1],
input.d_textureMap[3 * input.texWidth *(int)LV + 3 * (int)HU + 2]);
float3 colorHUHV = make_float3(
input.d_textureMap[3 * input.texWidth *(int)HV + 3 * (int)HU + 0],
input.d_textureMap[3 * input.texWidth *(int)HV + 3 * (int)HU + 1],
input.d_textureMap[3 * input.texWidth *(int)HV + 3 * (int)HU + 2]);
float weightLULV = (V0 - LV) * (U0 - LU);
float weightLUHV = (HV - V0) * (U0 - LU);
float weightHULV = (V0 - LV) * (HU - U0);
float weightHUHV = (HV - V0) * (HU - U0);
//printf("%f", weightLULV + weightLUHV + weightHULV + weightHUHV);
//color = weightLULV * colorLULV + weightHULV * colorHULV + weightLUHV * colorLUHV + weightHUHV * colorHUHV;
color = colorLULV;
}
else if (input.albedoMode == AlbedoMode::VertexColor)
{
color = make_float3(
input.d_vertexColor[indexv0].x * abc.x + input.d_vertexColor[indexv1].x * abc.y + input.d_vertexColor[indexv2].x * abc.z,
input.d_vertexColor[indexv0].y * abc.x + input.d_vertexColor[indexv1].y * abc.y + input.d_vertexColor[indexv2].y * abc.z,
input.d_vertexColor[indexv0].z * abc.x + input.d_vertexColor[indexv1].z * abc.y + input.d_vertexColor[indexv2].z * abc.z);
}
else if (input.albedoMode == AlbedoMode::Normal)
{
color = make_float3((1.f + pixNorm.x) / 2.f, (1.f + pixNorm.y) / 2.f, (1.f + pixNorm.z) / 2.f);
}
else if (input.albedoMode == AlbedoMode::Lighting)
{
color = make_float3(1.f, 1.f, 1.f);
}
else if (input.albedoMode == AlbedoMode::ForegroundMask)
{
color = make_float3(1.f, 1.f, 1.f);
}
//shading
if ((input.shadingMode == ShadingMode::Shaded && (input.albedoMode != AlbedoMode::Normal)) || input.albedoMode == AlbedoMode::Lighting)
{
color = getShading(color, pixNorm, input.d_shCoeff + (idc * 27));
}
input.d_renderBuffer[pixelId2 + 0] = color.x;
input.d_renderBuffer[pixelId2 + 1] = color.y;
input.d_renderBuffer[pixelId2 + 2] = color.z;
}
}
}
}
}
//==============================================================================================//
/*
Render the normal map buffers
*/
__global__ void renderNormalMapDevice(CUDABasedRasterizationInput input)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < input.texHeight * input.texWidth)
{
int2 index = index1DTo2D(input.texHeight, input.texWidth, idx);
int pixV = index.x;
int pixU = index.y;
float4 pixInfo = input.d_textureMapIds[pixV * input.texWidth + pixU];
int idf = pixInfo.x;
float3 abc = make_float3(pixInfo.y, pixInfo.z, pixInfo.w);
int indexv0 = input.d_facesVertex[idf].x;
int indexv1 = input.d_facesVertex[idf].y;
int indexv2 = input.d_facesVertex[idf].z;
//get pix normal
float3 v0_norm = input.d_vertexNormal[indexv0];
float3 v1_norm = input.d_vertexNormal[indexv1];
float3 v2_norm = input.d_vertexNormal[indexv2];
float3 pixNorm = v0_norm * abc.x + v1_norm * abc.y + v2_norm * abc.z;
if (length(pixNorm) != 0.f)
pixNorm = pixNorm / length(pixNorm);
pixNorm = (pixNorm + make_float3(1.f, 1.f, 1.f)) / 2.f;
input.d_normalMap[pixV * input.texWidth + pixU] = pixNorm;
}
}
//==============================================================================================//
extern "C" void renderBuffersGPU(CUDABasedRasterizationInput& input)
{
initializeCamerasDevice << < 1, 1 >> > (input);
initializeDevice << <(input.w*input.h*input.numberOfCameras + THREADS_PER_BLOCK_CUDABASEDRASTERIZER - 1) / THREADS_PER_BLOCK_CUDABASEDRASTERIZER, THREADS_PER_BLOCK_CUDABASEDRASTERIZER >> > (input);
projectVerticesDevice << <(input.N*input.numberOfCameras + THREADS_PER_BLOCK_CUDABASEDRASTERIZER - 1) / THREADS_PER_BLOCK_CUDABASEDRASTERIZER, THREADS_PER_BLOCK_CUDABASEDRASTERIZER >> >(input);
projectFacesDevice << <(input.F*input.numberOfCameras + THREADS_PER_BLOCK_CUDABASEDRASTERIZER - 1) / THREADS_PER_BLOCK_CUDABASEDRASTERIZER, THREADS_PER_BLOCK_CUDABASEDRASTERIZER >> >(input);
renderFaceNormalDevice << <(input.F*input.numberOfCameras + THREADS_PER_BLOCK_CUDABASEDRASTERIZER - 1) / THREADS_PER_BLOCK_CUDABASEDRASTERIZER, THREADS_PER_BLOCK_CUDABASEDRASTERIZER >> >(input);
renderVertexNormalDevice << <(input.N*input.numberOfCameras + THREADS_PER_BLOCK_CUDABASEDRASTERIZER - 1) / THREADS_PER_BLOCK_CUDABASEDRASTERIZER, THREADS_PER_BLOCK_CUDABASEDRASTERIZER >> >(input);
if (input.computeNormal)
{
renderNormalMapDevice << <(input.texWidth*input.texHeight + THREADS_PER_BLOCK_CUDABASEDRASTERIZER - 1) / THREADS_PER_BLOCK_CUDABASEDRASTERIZER, THREADS_PER_BLOCK_CUDABASEDRASTERIZER >> > (input);
}
else
{
renderDepthBufferDevice << <(input.F*input.numberOfCameras + THREADS_PER_BLOCK_CUDABASEDRASTERIZER - 1) / THREADS_PER_BLOCK_CUDABASEDRASTERIZER, THREADS_PER_BLOCK_CUDABASEDRASTERIZER >> >(input);
renderBuffersDevice << <(input.F*input.numberOfCameras + THREADS_PER_BLOCK_CUDABASEDRASTERIZER - 1) / THREADS_PER_BLOCK_CUDABASEDRASTERIZER, THREADS_PER_BLOCK_CUDABASEDRASTERIZER >> > (input);
}
}
|
9bc76ed7a0d864995266993d002a2141c0eb9c78.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/lookup_table_op.h"
#include "paddle/fluid/platform/assert.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, int BlockDimX, int BlockDimY, int GridDimX,
bool PaddingFlag>
__global__ void LookupTable(T *output, const T *table, const int64_t *ids,
const int64_t N, const int64_t K, const int64_t D,
const int64_t padding_idx) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
PADDLE_ASSERT_MSG(
id >= 0,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
PADDLE_ASSERT_MSG(
id < N,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
T *out = output + idy * D;
const T *tab = table + id * D;
for (int i = idx; i < D; i += BlockDimX) {
if (PaddingFlag) {
if (id == padding_idx)
out[i] = static_cast<T>(0);
else
out[i] = tab[i];
} else {
out[i] = tab[i];
}
}
idy += BlockDimY * GridDimX;
}
}
template <typename T, int BlockDimX, int BlockDimY, int GridDimX>
__global__ void LookupTableGrad(T *table, const T *output, const int64_t *ids,
const int64_t N, const int64_t K,
const int64_t D) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
PADDLE_ASSERT_MSG(
id >= 0,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
PADDLE_ASSERT_MSG(
id < N,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
const T *out = output + idy * D;
T *tab = table + id * D;
for (int i = idx; i < D; i += BlockDimX) {
paddle::platform::CudaAtomicAdd(&tab[i], out[i]);
}
idy += BlockDimY * GridDimX;
}
}
template <typename T>
class LookupTableCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *table_t = context.Input<LoDTensor>("W");
auto *ids_t = context.Input<LoDTensor>("Ids");
auto *output_t = context.Output<LoDTensor>("Out");
int64_t padding_idx = context.Attr<int64_t>("padding_idx");
auto id_name = context.Inputs("Ids").front();
auto out_name = context.Outputs("Out").front();
size_t N = table_t->dims()[0];
size_t D = table_t->dims()[1];
size_t K = ids_t->numel();
auto *ids = ids_t->data<int64_t>();
auto *table = table_t->data<T>();
auto *output = output_t->mutable_data<T>(context.GetPlace());
dim3 threads(128, 8);
dim3 grids(8, 1);
if (padding_idx == -1)
hipLaunchKernelGGL(( LookupTable<
T, 128, 8, 8,
false>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(),
output, table, ids, N, K, D, padding_idx);
else
hipLaunchKernelGGL(( LookupTable<
T, 128, 8, 8,
true>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(),
output, table, ids, N, K, D, padding_idx);
}
};
template <typename T>
class LookupTableGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto &dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
bool is_sparse = context.Attr<bool>("is_sparse");
// Since paddings are not trainable and fixed in forward, the gradient of
// paddings makes no sense and we don't deal with it in backward.
if (is_sparse) {
auto *ids = context.Input<LoDTensor>("Ids");
auto *table = context.Input<LoDTensor>("W");
auto *d_output = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto *d_table = context.Output<SelectedRows>(framework::GradVarName("W"));
auto *ids_data = ids->data<int64_t>();
int64_t ids_num = ids->numel();
auto stream = dev_ctx.stream();
// copy GPU memory to CPU pinned memory
framework::Vector<int64_t> new_rows;
new_rows.resize(ids_num);
auto gpu_place = boost::get<platform::CUDAPlace>(context.GetPlace());
// TODO(yuyang18): Strange code here.
memory::Copy(gpu_place, new_rows.CUDAMutableData(context.GetPlace()),
gpu_place, ids_data, ids_num * sizeof(int64_t), stream);
d_table->set_rows(new_rows);
auto *d_table_value = d_table->mutable_value();
d_table_value->Resize({ids_num, table->dims()[1]});
d_table_value->mutable_data<T>(context.GetPlace());
auto *d_table_data = d_table_value->data<T>();
auto *d_output_data = d_output->data<T>();
auto d_output_dims = d_output->dims();
PADDLE_ENFORCE_EQ(
d_table_value->dims(),
framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1));
memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data,
d_output->numel() * sizeof(T), stream);
} else {
auto ids_t = context.Input<LoDTensor>("Ids");
auto d_output_t = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto d_table_t = context.Output<LoDTensor>(framework::GradVarName("W"));
int N = d_table_t->dims()[0];
int D = d_table_t->dims()[1];
int K = ids_t->numel();
const int64_t *ids = ids_t->data<int64_t>();
const T *d_output = d_output_t->data<T>();
T *d_table = d_table_t->mutable_data<T>(context.GetPlace());
auto t = framework::EigenVector<T>::Flatten(*d_table_t);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0));
dim3 threads(128, 8);
dim3 grids(8, 1);
hipLaunchKernelGGL(( LookupTableGrad<T, 128, 8, 8>), dim3(grids), dim3(threads), 0, dev_ctx.stream(),
d_table, d_output, ids, N, K, D);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(lookup_table, ops::LookupTableCUDAKernel<float>,
ops::LookupTableCUDAKernel<double>,
ops::LookupTableCUDAKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(lookup_table_grad,
ops::LookupTableGradCUDAKernel<float>,
ops::LookupTableGradCUDAKernel<double>,
ops::LookupTableGradCUDAKernel<plat::float16>);
|
9bc76ed7a0d864995266993d002a2141c0eb9c78.cu
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/lookup_table_op.h"
#include "paddle/fluid/platform/assert.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, int BlockDimX, int BlockDimY, int GridDimX,
bool PaddingFlag>
__global__ void LookupTable(T *output, const T *table, const int64_t *ids,
const int64_t N, const int64_t K, const int64_t D,
const int64_t padding_idx) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
PADDLE_ASSERT_MSG(
id >= 0,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
PADDLE_ASSERT_MSG(
id < N,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
T *out = output + idy * D;
const T *tab = table + id * D;
for (int i = idx; i < D; i += BlockDimX) {
if (PaddingFlag) {
if (id == padding_idx)
out[i] = static_cast<T>(0);
else
out[i] = tab[i];
} else {
out[i] = tab[i];
}
}
idy += BlockDimY * GridDimX;
}
}
template <typename T, int BlockDimX, int BlockDimY, int GridDimX>
__global__ void LookupTableGrad(T *table, const T *output, const int64_t *ids,
const int64_t N, const int64_t K,
const int64_t D) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
PADDLE_ASSERT_MSG(
id >= 0,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
PADDLE_ASSERT_MSG(
id < N,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
const T *out = output + idy * D;
T *tab = table + id * D;
for (int i = idx; i < D; i += BlockDimX) {
paddle::platform::CudaAtomicAdd(&tab[i], out[i]);
}
idy += BlockDimY * GridDimX;
}
}
template <typename T>
class LookupTableCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *table_t = context.Input<LoDTensor>("W");
auto *ids_t = context.Input<LoDTensor>("Ids");
auto *output_t = context.Output<LoDTensor>("Out");
int64_t padding_idx = context.Attr<int64_t>("padding_idx");
auto id_name = context.Inputs("Ids").front();
auto out_name = context.Outputs("Out").front();
size_t N = table_t->dims()[0];
size_t D = table_t->dims()[1];
size_t K = ids_t->numel();
auto *ids = ids_t->data<int64_t>();
auto *table = table_t->data<T>();
auto *output = output_t->mutable_data<T>(context.GetPlace());
dim3 threads(128, 8);
dim3 grids(8, 1);
if (padding_idx == -1)
LookupTable<
T, 128, 8, 8,
false><<<grids, threads, 0, context.cuda_device_context().stream()>>>(
output, table, ids, N, K, D, padding_idx);
else
LookupTable<
T, 128, 8, 8,
true><<<grids, threads, 0, context.cuda_device_context().stream()>>>(
output, table, ids, N, K, D, padding_idx);
}
};
template <typename T>
class LookupTableGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto &dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
bool is_sparse = context.Attr<bool>("is_sparse");
// Since paddings are not trainable and fixed in forward, the gradient of
// paddings makes no sense and we don't deal with it in backward.
if (is_sparse) {
auto *ids = context.Input<LoDTensor>("Ids");
auto *table = context.Input<LoDTensor>("W");
auto *d_output = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto *d_table = context.Output<SelectedRows>(framework::GradVarName("W"));
auto *ids_data = ids->data<int64_t>();
int64_t ids_num = ids->numel();
auto stream = dev_ctx.stream();
// copy GPU memory to CPU pinned memory
framework::Vector<int64_t> new_rows;
new_rows.resize(ids_num);
auto gpu_place = boost::get<platform::CUDAPlace>(context.GetPlace());
// TODO(yuyang18): Strange code here.
memory::Copy(gpu_place, new_rows.CUDAMutableData(context.GetPlace()),
gpu_place, ids_data, ids_num * sizeof(int64_t), stream);
d_table->set_rows(new_rows);
auto *d_table_value = d_table->mutable_value();
d_table_value->Resize({ids_num, table->dims()[1]});
d_table_value->mutable_data<T>(context.GetPlace());
auto *d_table_data = d_table_value->data<T>();
auto *d_output_data = d_output->data<T>();
auto d_output_dims = d_output->dims();
PADDLE_ENFORCE_EQ(
d_table_value->dims(),
framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1));
memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data,
d_output->numel() * sizeof(T), stream);
} else {
auto ids_t = context.Input<LoDTensor>("Ids");
auto d_output_t = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto d_table_t = context.Output<LoDTensor>(framework::GradVarName("W"));
int N = d_table_t->dims()[0];
int D = d_table_t->dims()[1];
int K = ids_t->numel();
const int64_t *ids = ids_t->data<int64_t>();
const T *d_output = d_output_t->data<T>();
T *d_table = d_table_t->mutable_data<T>(context.GetPlace());
auto t = framework::EigenVector<T>::Flatten(*d_table_t);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0));
dim3 threads(128, 8);
dim3 grids(8, 1);
LookupTableGrad<T, 128, 8, 8><<<grids, threads, 0, dev_ctx.stream()>>>(
d_table, d_output, ids, N, K, D);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(lookup_table, ops::LookupTableCUDAKernel<float>,
ops::LookupTableCUDAKernel<double>,
ops::LookupTableCUDAKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(lookup_table_grad,
ops::LookupTableGradCUDAKernel<float>,
ops::LookupTableGradCUDAKernel<double>,
ops::LookupTableGradCUDAKernel<plat::float16>);
|
5a6d77bcc65111bcdfe12916d58c62dd29a74d34.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "add_1d.h"
#include "util.h"
#include <stdio.h>
__global__ void add_1d(float * A, float * B, float * C, int size) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < size) {
C[idx] = A[idx] + B[idx];
}
}
void c_add_1d(float * A, float * B, float * C, int size) {
dim3 block(32, 1);
dim3 grid(32, 1);
assume_1d_thread_grid(&grid, &block, size);
printf("use grid(%d,%d) block(%d, %d)\n", grid.x, grid.y, block.x, block.y);
hipLaunchKernelGGL(( add_1d), dim3(grid), dim3(block), 0, 0, A, B, C, size);
}
|
5a6d77bcc65111bcdfe12916d58c62dd29a74d34.cu
|
#include <cuda_runtime.h>
#include "add_1d.h"
#include "util.h"
#include <stdio.h>
__global__ void add_1d(float * A, float * B, float * C, int size) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < size) {
C[idx] = A[idx] + B[idx];
}
}
void c_add_1d(float * A, float * B, float * C, int size) {
dim3 block(32, 1);
dim3 grid(32, 1);
assume_1d_thread_grid(&grid, &block, size);
printf("use grid(%d,%d) block(%d, %d)\n", grid.x, grid.y, block.x, block.y);
add_1d<<<grid, block>>>(A, B, C, size);
}
|
0e9f521992fb03bdf6c01b351fd7acb2d4c5f061.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "ATen/NativeFunctions.h"
#include "ATen/Dispatch.h"
#include "ATen/hip/HIPApplyUtils.cuh"
namespace {
template <typename scalar_t>
void where_cuda(
at::Tensor& ret,
const at::Tensor& condition,
const at::Tensor& self,
const at::Tensor& other) {
// Yes this name is repetitive, but the CPU version is called
// CPU_tensor_apply4 and we don't have a CPU namespace or directory.
at::cuda::CUDA_tensor_apply4<scalar_t, uint8_t, scalar_t, scalar_t>(
ret,
condition,
self,
other,
[] __device__(
scalar_t & ret_val,
const uint8_t& cond_val,
const scalar_t& self_val,
const scalar_t& other_val) {
ret_val = cond_val ? self_val : other_val;
});
}
} // namespace
namespace at { namespace native {
Tensor _s_where_cuda(
const Tensor& condition,
const Tensor& self,
const Tensor& other) {
Tensor ret = at::empty(self.sizes(), self.options());
AT_DISPATCH_ALL_TYPES_AND_HALF(ret.type(), "where", [&] {
where_cuda<scalar_t>(ret, condition, self, other);
});
return ret;
}
}} // namespace at::native
|
0e9f521992fb03bdf6c01b351fd7acb2d4c5f061.cu
|
#include "ATen/NativeFunctions.h"
#include "ATen/Dispatch.h"
#include "ATen/cuda/CUDAApplyUtils.cuh"
namespace {
template <typename scalar_t>
void where_cuda(
at::Tensor& ret,
const at::Tensor& condition,
const at::Tensor& self,
const at::Tensor& other) {
// Yes this name is repetitive, but the CPU version is called
// CPU_tensor_apply4 and we don't have a CPU namespace or directory.
at::cuda::CUDA_tensor_apply4<scalar_t, uint8_t, scalar_t, scalar_t>(
ret,
condition,
self,
other,
[] __device__(
scalar_t & ret_val,
const uint8_t& cond_val,
const scalar_t& self_val,
const scalar_t& other_val) {
ret_val = cond_val ? self_val : other_val;
});
}
} // namespace
namespace at { namespace native {
Tensor _s_where_cuda(
const Tensor& condition,
const Tensor& self,
const Tensor& other) {
Tensor ret = at::empty(self.sizes(), self.options());
AT_DISPATCH_ALL_TYPES_AND_HALF(ret.type(), "where", [&] {
where_cuda<scalar_t>(ret, condition, self, other);
});
return ret;
}
}} // namespace at::native
|
7143a60fbdcdfe5347cbe2a8926af5b6d65585f1.hip
|
// !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
|
7143a60fbdcdfe5347cbe2a8926af5b6d65585f1.cu
|
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
25add9b0c6aaf0e1cd646e8c6a730909ca5c521e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2014, Julian Straub <[email protected]>
* Licensed under the MIT license. See the license file LICENSE.
*/
#include <stdint.h>
#include <stdio.h>
#include <nvidia/helper_cuda.h>
#include <cudaPcl/cudaSphereHelpers.h>
#define BLOCK_WIDTH 16
#define BLOCK_SIZE BLOCK_WIDTH*BLOCK_WIDTH
// step size of the normals
// for PointXYZI
#define X_STEP 8
#define X_OFFSET 0
// for PointXYZ
//#define X_STEP 4
//#define X_OFFSET 0
// TODO: try to copy the points in the tangent space out of the memory
// and process them on CPU
__global__ void meanInTpS2(float *d_p, float *d_q, unsigned short *z,
float *mu_karch, int w, int h) //, float *N)
{
__shared__ float p[3*6];
// one J per column; BLOCK_SIZE columns; per column first 3 first col of J,
// second 3 columns second cols of J
// forth row is number of associated points
__shared__ float mu[BLOCK_SIZE*4*6];
//__shared__ float Ni[BLOCK_SIZE*6];
//const int tid = threadIdx.x;
const int tid = threadIdx.x + blockDim.x * threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
// caching
if(tid < 3*6) p[tid] = d_p[tid];
#pragma unroll
for(int s=0; s<6*4; ++s) {
// this is almost certainly bad ordering
mu[tid+BLOCK_SIZE*s] = 0.0f;
}
//#pragma unroll
// for(int s=0; s<6; ++s) {
// Ni[tid+BLOCK_SIZE*s] = 0.0f;
// }
__syncthreads(); // make sure that ys have been cached
for(uint32_t ix=0; ix<8; ++ix)
for(uint32_t iy=0; iy<4; ++iy)
{
int id = idx+ix*w/8 + (idy+iy*h/4)*w;
if (id<w*h)
{
uint16_t zi = z[id];
if(zi<6){ // if point is good
float q[3], x[3];
q[0] = d_q[id*X_STEP+X_OFFSET+0];
q[1] = d_q[id*X_STEP+X_OFFSET+1];
q[2] = d_q[id*X_STEP+X_OFFSET+2];
Log_p(p+zi*3,q,x);
// float dot = min(1.0f,max(-1.0f,q[0]*p[zi*3+0] + q[1]*p[zi*3+1]
// + q[2]*p[zi*3+2]));
// float theta = acosf(dot);
// float sinc;
// if(theta < 1.e-8)
// sinc = 1.0f;
// else
// sinc = theta/sinf(theta);
// float x[3];
// x[0] = (q[0]-p[zi*3+0]*dot)*sinc;
// x[1] = (q[1]-p[zi*3+1]*dot)*sinc;
// x[2] = (q[2]-p[zi*3+2]*dot)*sinc;
mu[tid+(zi*4+0)*BLOCK_SIZE] += x[0];
mu[tid+(zi*4+1)*BLOCK_SIZE] += x[1];
mu[tid+(zi*4+2)*BLOCK_SIZE] += x[2];
mu[tid+(zi*4+3)*BLOCK_SIZE] += 1.0f;
}
}
}
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>1; s>>=1) {
if(tid < s)
{
#pragma unroll
for( int k=0; k<6*4; ++k) {
int tidk = k*BLOCK_SIZE+tid;
mu[tidk] += mu[tidk + s];
}
}
__syncthreads();
}
if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) {
atomicAdd(&mu_karch[tid],mu[tid*BLOCK_SIZE]+mu[tid*BLOCK_SIZE+1]);
}
}
__global__ void meanInTpS2(float *d_p, float *d_q, unsigned short *z,
float* d_weights, float *mu_karch, int w, int h) //, float *N)
{
__shared__ float p[3*6];
// one J per column; BLOCK_SIZE columns; per column first 3 first col of J,
// second 3 columns second cols of J
// forth row is number of associated points
__shared__ float mu[BLOCK_SIZE*4*6];
//__shared__ float Ni[BLOCK_SIZE*6];
//const int tid = threadIdx.x;
const int tid = threadIdx.x + blockDim.x * threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
// caching
if(tid < 3*6) p[tid] = d_p[tid];
#pragma unroll
for(int s=0; s<6*4; ++s) {
// this is almost certainly bad ordering
mu[tid+BLOCK_SIZE*s] = 0.0f;
}
//#pragma unroll
// for(int s=0; s<6; ++s) {
// Ni[tid+BLOCK_SIZE*s] = 0.0f;
// }
__syncthreads(); // make sure that ys have been cached
for(uint32_t ix=0; ix<8; ++ix)
for(uint32_t iy=0; iy<4; ++iy)
{
int id = idx+ix*w/8 + (idy+iy*h/4)*w;
if (id<w*h)
{
uint16_t zi = z[id];
float wi = d_weights[id];
if(zi<6){ // if point is good
float q[3],x[3];
q[0] = d_q[id*X_STEP+X_OFFSET+0];
q[1] = d_q[id*X_STEP+X_OFFSET+1];
q[2] = d_q[id*X_STEP+X_OFFSET+2];
Log_p(p+zi*3,q,x);
// float dot = min(1.0f,max(-1.0f,q[0]*p[zi*3+0] + q[1]*p[zi*3+1]
// + q[2]*p[zi*3+2]));
// float theta = acosf(dot);
// float sinc;
// if(theta < 1.e-8)
// sinc = 1.0f;
// else
// sinc = theta/sinf(theta);
// float x[3];
// x[0] = (q[0]-p[zi*3+0]*dot)*sinc;
// x[1] = (q[1]-p[zi*3+1]*dot)*sinc;
// x[2] = (q[2]-p[zi*3+2]*dot)*sinc;
mu[tid+(zi*4+0)*BLOCK_SIZE] += wi*x[0];
mu[tid+(zi*4+1)*BLOCK_SIZE] += wi*x[1];
mu[tid+(zi*4+2)*BLOCK_SIZE] += wi*x[2];
mu[tid+(zi*4+3)*BLOCK_SIZE] += wi;
}
}
}
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>1; s>>=1) {
if(tid < s)
{
#pragma unroll
for( int k=0; k<6*4; ++k) {
int tidk = k*BLOCK_SIZE+tid;
mu[tidk] += mu[tidk + s];
}
}
__syncthreads();
}
if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) {
atomicAdd(&mu_karch[tid],mu[tid*BLOCK_SIZE]+mu[tid*BLOCK_SIZE+1]);
}
}
extern "C" void meanInTpS2GPU(float *h_p, float *d_p, float *h_mu_karch,
float *d_mu_karch, float *d_q, uint16_t *d_z, float* d_weights ,int w, int h)
{
for(uint32_t i=0; i<4*6; ++i)
h_mu_karch[i] =0.0f;
checkCudaErrors(hipMemcpy(d_mu_karch, h_mu_karch, 6*4* sizeof(float),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_p, h_p, 6*3* sizeof(float),
hipMemcpyHostToDevice));
dim3 threads(BLOCK_WIDTH,BLOCK_WIDTH,1);
// this way for 640x480 there is no remainders
//dim3 blocks(w/128+(w%128>0?1:0), h/32+(h%32>0?1:0),1);
// this still seems to be fastest
dim3 blocks(w/128+(w%128>0?1:0), h/64+(h%64>0?1:0),1);
//printf("%d x %d",w/32+(w%32>0?1:0),h/16+(h%16>0?1:0));
if(d_weights == NULL)
hipLaunchKernelGGL(( meanInTpS2), dim3(blocks),dim3(threads), 0, 0, d_p,d_q, d_z, d_mu_karch,w,h);
else
hipLaunchKernelGGL(( meanInTpS2), dim3(blocks),dim3(threads), 0, 0, d_p,d_q, d_z, d_weights, d_mu_karch,w,h);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(h_mu_karch, d_mu_karch, 6*4*sizeof(float),
hipMemcpyDeviceToHost));
};
__global__ void sufficientStatisticsOnTpS2(
float *d_p, float *Rnorths,
float *d_q, unsigned short *z, int w, int h,
float *SSs
) //, float *N)
{
__shared__ float p[3*6];
// sufficient statistics for whole blocksize
// 2 (x in TpS @north) + 1 (count) + 4 (outer product in TpS @north)
// all fo that times 6 for the different axes
__shared__ float xSSs[BLOCK_SIZE*(2+1+4)*6];
__shared__ float sRnorths[6*6];
//const int tid = threadIdx.x;
const int tid = threadIdx.x + blockDim.x * threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
// caching
if(tid < 3*6) p[tid] = d_p[tid];
if(3*6 <= tid && tid <3*6+6*6) sRnorths[tid-3*6] = Rnorths[tid-3*6];
#pragma unroll
for(int s=0; s<6*7; ++s) {
// this is almost certainly bad ordering
xSSs[tid+BLOCK_SIZE*s] = 0.0f;
}
//#pragma unroll
// for(int s=0; s<6; ++s) {
// Ni[tid+BLOCK_SIZE*s] = 0.0f;
// }
__syncthreads(); // make sure that ys have been cached
for(uint32_t ix=0; ix<8; ++ix)
for(uint32_t iy=0; iy<4; ++iy)
{
int id = idx+ix*w/8 + (idy+iy*h/4)*w;
if (id<w*h)
{
uint16_t zi = z[id];
if(zi<6){ // if point is good
// copy q into local memory
float q[3];
q[0] = d_q[id*X_STEP+X_OFFSET+0];
q[1] = d_q[id*X_STEP+X_OFFSET+1];
q[2] = d_q[id*X_STEP+X_OFFSET+2];
// transform to TpS^2
float dot = min(1.0f,max(-1.0f,q[0]*p[zi*3+0] + q[1]*p[zi*3+1]
+ q[2]*p[zi*3+2]));
float theta = acosf(dot);
float sinc;
if(theta < 1.e-8)
sinc = 1.0f;
else
sinc = theta/sinf(theta);
float x[3];
x[0] = (q[0]-p[zi*3+0]*dot)*sinc;
x[1] = (q[1]-p[zi*3+1]*dot)*sinc;
x[2] = (q[2]-p[zi*3+2]*dot)*sinc;
// rotate up to north pole
float xNorth[2];
xNorth[0] = sRnorths[zi*6+0]*x[0] + sRnorths[zi*6+1]*x[1]
+ sRnorths[zi*6+2]*x[2];
xNorth[1] = sRnorths[zi*6+3]*x[0] + sRnorths[zi*6+4]*x[1]
+ sRnorths[zi*6+5]*x[2];
// input sufficient statistics
xSSs[tid+(zi*7+0)*BLOCK_SIZE] += xNorth[0];
xSSs[tid+(zi*7+1)*BLOCK_SIZE] += xNorth[1];
xSSs[tid+(zi*7+2)*BLOCK_SIZE] += xNorth[0]*xNorth[0];
xSSs[tid+(zi*7+3)*BLOCK_SIZE] += xNorth[1]*xNorth[0];
xSSs[tid+(zi*7+4)*BLOCK_SIZE] += xNorth[0]*xNorth[1];
xSSs[tid+(zi*7+5)*BLOCK_SIZE] += xNorth[1]*xNorth[1];
xSSs[tid+(zi*7+6)*BLOCK_SIZE] += 1.0f;
}
}
}
// old reduction.....
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>1; s>>=1) {
if(tid < s)
{
#pragma unroll
for( int k=0; k<6*7; ++k) {
int tidk = k*BLOCK_SIZE+tid;
xSSs[tidk] += xSSs[tidk + s];
}
}
__syncthreads();
}
if(tid < 6*7) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) {
atomicAdd(&SSs[tid],xSSs[tid*BLOCK_SIZE]+xSSs[tid*BLOCK_SIZE+1]);
}
}
extern "C" void sufficientStatisticsOnTpS2GPU(float *h_p, float *d_p,
float *h_Rnorths, float *d_Rnorths, float *d_q, uint16_t *d_z ,int w, int h,
float *h_SSs, float *d_SSs)
{
for(uint32_t i=0; i<7*6; ++i)
h_SSs[i] =0.0f;
checkCudaErrors(hipMemcpy(d_SSs, h_SSs, 6*7* sizeof(float),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_p, h_p, 6*3* sizeof(float),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_Rnorths, h_Rnorths, 6*6* sizeof(float),
hipMemcpyHostToDevice));
dim3 threads(16,16,1);
dim3 blocks(w/128+(w%128>0?1:0), h/64+(h%64>0?1:0),1);
hipLaunchKernelGGL(( sufficientStatisticsOnTpS2), dim3(blocks),dim3(threads), 0, 0, d_p,d_Rnorths,
d_q, d_z,w,h,d_SSs);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(h_SSs, d_SSs, 6*7*sizeof(float),
hipMemcpyDeviceToHost));
};
|
25add9b0c6aaf0e1cd646e8c6a730909ca5c521e.cu
|
/* Copyright (c) 2014, Julian Straub <[email protected]>
* Licensed under the MIT license. See the license file LICENSE.
*/
#include <stdint.h>
#include <stdio.h>
#include <nvidia/helper_cuda.h>
#include <cudaPcl/cudaSphereHelpers.h>
#define BLOCK_WIDTH 16
#define BLOCK_SIZE BLOCK_WIDTH*BLOCK_WIDTH
// step size of the normals
// for PointXYZI
#define X_STEP 8
#define X_OFFSET 0
// for PointXYZ
//#define X_STEP 4
//#define X_OFFSET 0
// TODO: try to copy the points in the tangent space out of the memory
// and process them on CPU
__global__ void meanInTpS2(float *d_p, float *d_q, unsigned short *z,
float *mu_karch, int w, int h) //, float *N)
{
__shared__ float p[3*6];
// one J per column; BLOCK_SIZE columns; per column first 3 first col of J,
// second 3 columns second cols of J
// forth row is number of associated points
__shared__ float mu[BLOCK_SIZE*4*6];
//__shared__ float Ni[BLOCK_SIZE*6];
//const int tid = threadIdx.x;
const int tid = threadIdx.x + blockDim.x * threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
// caching
if(tid < 3*6) p[tid] = d_p[tid];
#pragma unroll
for(int s=0; s<6*4; ++s) {
// this is almost certainly bad ordering
mu[tid+BLOCK_SIZE*s] = 0.0f;
}
//#pragma unroll
// for(int s=0; s<6; ++s) {
// Ni[tid+BLOCK_SIZE*s] = 0.0f;
// }
__syncthreads(); // make sure that ys have been cached
for(uint32_t ix=0; ix<8; ++ix)
for(uint32_t iy=0; iy<4; ++iy)
{
int id = idx+ix*w/8 + (idy+iy*h/4)*w;
if (id<w*h)
{
uint16_t zi = z[id];
if(zi<6){ // if point is good
float q[3], x[3];
q[0] = d_q[id*X_STEP+X_OFFSET+0];
q[1] = d_q[id*X_STEP+X_OFFSET+1];
q[2] = d_q[id*X_STEP+X_OFFSET+2];
Log_p(p+zi*3,q,x);
// float dot = min(1.0f,max(-1.0f,q[0]*p[zi*3+0] + q[1]*p[zi*3+1]
// + q[2]*p[zi*3+2]));
// float theta = acosf(dot);
// float sinc;
// if(theta < 1.e-8)
// sinc = 1.0f;
// else
// sinc = theta/sinf(theta);
// float x[3];
// x[0] = (q[0]-p[zi*3+0]*dot)*sinc;
// x[1] = (q[1]-p[zi*3+1]*dot)*sinc;
// x[2] = (q[2]-p[zi*3+2]*dot)*sinc;
mu[tid+(zi*4+0)*BLOCK_SIZE] += x[0];
mu[tid+(zi*4+1)*BLOCK_SIZE] += x[1];
mu[tid+(zi*4+2)*BLOCK_SIZE] += x[2];
mu[tid+(zi*4+3)*BLOCK_SIZE] += 1.0f;
}
}
}
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>1; s>>=1) {
if(tid < s)
{
#pragma unroll
for( int k=0; k<6*4; ++k) {
int tidk = k*BLOCK_SIZE+tid;
mu[tidk] += mu[tidk + s];
}
}
__syncthreads();
}
if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) {
atomicAdd(&mu_karch[tid],mu[tid*BLOCK_SIZE]+mu[tid*BLOCK_SIZE+1]);
}
}
__global__ void meanInTpS2(float *d_p, float *d_q, unsigned short *z,
float* d_weights, float *mu_karch, int w, int h) //, float *N)
{
__shared__ float p[3*6];
// one J per column; BLOCK_SIZE columns; per column first 3 first col of J,
// second 3 columns second cols of J
// forth row is number of associated points
__shared__ float mu[BLOCK_SIZE*4*6];
//__shared__ float Ni[BLOCK_SIZE*6];
//const int tid = threadIdx.x;
const int tid = threadIdx.x + blockDim.x * threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
// caching
if(tid < 3*6) p[tid] = d_p[tid];
#pragma unroll
for(int s=0; s<6*4; ++s) {
// this is almost certainly bad ordering
mu[tid+BLOCK_SIZE*s] = 0.0f;
}
//#pragma unroll
// for(int s=0; s<6; ++s) {
// Ni[tid+BLOCK_SIZE*s] = 0.0f;
// }
__syncthreads(); // make sure that ys have been cached
for(uint32_t ix=0; ix<8; ++ix)
for(uint32_t iy=0; iy<4; ++iy)
{
int id = idx+ix*w/8 + (idy+iy*h/4)*w;
if (id<w*h)
{
uint16_t zi = z[id];
float wi = d_weights[id];
if(zi<6){ // if point is good
float q[3],x[3];
q[0] = d_q[id*X_STEP+X_OFFSET+0];
q[1] = d_q[id*X_STEP+X_OFFSET+1];
q[2] = d_q[id*X_STEP+X_OFFSET+2];
Log_p(p+zi*3,q,x);
// float dot = min(1.0f,max(-1.0f,q[0]*p[zi*3+0] + q[1]*p[zi*3+1]
// + q[2]*p[zi*3+2]));
// float theta = acosf(dot);
// float sinc;
// if(theta < 1.e-8)
// sinc = 1.0f;
// else
// sinc = theta/sinf(theta);
// float x[3];
// x[0] = (q[0]-p[zi*3+0]*dot)*sinc;
// x[1] = (q[1]-p[zi*3+1]*dot)*sinc;
// x[2] = (q[2]-p[zi*3+2]*dot)*sinc;
mu[tid+(zi*4+0)*BLOCK_SIZE] += wi*x[0];
mu[tid+(zi*4+1)*BLOCK_SIZE] += wi*x[1];
mu[tid+(zi*4+2)*BLOCK_SIZE] += wi*x[2];
mu[tid+(zi*4+3)*BLOCK_SIZE] += wi;
}
}
}
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>1; s>>=1) {
if(tid < s)
{
#pragma unroll
for( int k=0; k<6*4; ++k) {
int tidk = k*BLOCK_SIZE+tid;
mu[tidk] += mu[tidk + s];
}
}
__syncthreads();
}
if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) {
atomicAdd(&mu_karch[tid],mu[tid*BLOCK_SIZE]+mu[tid*BLOCK_SIZE+1]);
}
}
extern "C" void meanInTpS2GPU(float *h_p, float *d_p, float *h_mu_karch,
float *d_mu_karch, float *d_q, uint16_t *d_z, float* d_weights ,int w, int h)
{
for(uint32_t i=0; i<4*6; ++i)
h_mu_karch[i] =0.0f;
checkCudaErrors(cudaMemcpy(d_mu_karch, h_mu_karch, 6*4* sizeof(float),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_p, h_p, 6*3* sizeof(float),
cudaMemcpyHostToDevice));
dim3 threads(BLOCK_WIDTH,BLOCK_WIDTH,1);
// this way for 640x480 there is no remainders
//dim3 blocks(w/128+(w%128>0?1:0), h/32+(h%32>0?1:0),1);
// this still seems to be fastest
dim3 blocks(w/128+(w%128>0?1:0), h/64+(h%64>0?1:0),1);
//printf("%d x %d",w/32+(w%32>0?1:0),h/16+(h%16>0?1:0));
if(d_weights == NULL)
meanInTpS2<<<blocks,threads>>>(d_p,d_q, d_z, d_mu_karch,w,h);
else
meanInTpS2<<<blocks,threads>>>(d_p,d_q, d_z, d_weights, d_mu_karch,w,h);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(h_mu_karch, d_mu_karch, 6*4*sizeof(float),
cudaMemcpyDeviceToHost));
};
__global__ void sufficientStatisticsOnTpS2(
float *d_p, float *Rnorths,
float *d_q, unsigned short *z, int w, int h,
float *SSs
) //, float *N)
{
__shared__ float p[3*6];
// sufficient statistics for whole blocksize
// 2 (x in TpS @north) + 1 (count) + 4 (outer product in TpS @north)
// all fo that times 6 for the different axes
__shared__ float xSSs[BLOCK_SIZE*(2+1+4)*6];
__shared__ float sRnorths[6*6];
//const int tid = threadIdx.x;
const int tid = threadIdx.x + blockDim.x * threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
// caching
if(tid < 3*6) p[tid] = d_p[tid];
if(3*6 <= tid && tid <3*6+6*6) sRnorths[tid-3*6] = Rnorths[tid-3*6];
#pragma unroll
for(int s=0; s<6*7; ++s) {
// this is almost certainly bad ordering
xSSs[tid+BLOCK_SIZE*s] = 0.0f;
}
//#pragma unroll
// for(int s=0; s<6; ++s) {
// Ni[tid+BLOCK_SIZE*s] = 0.0f;
// }
__syncthreads(); // make sure that ys have been cached
for(uint32_t ix=0; ix<8; ++ix)
for(uint32_t iy=0; iy<4; ++iy)
{
int id = idx+ix*w/8 + (idy+iy*h/4)*w;
if (id<w*h)
{
uint16_t zi = z[id];
if(zi<6){ // if point is good
// copy q into local memory
float q[3];
q[0] = d_q[id*X_STEP+X_OFFSET+0];
q[1] = d_q[id*X_STEP+X_OFFSET+1];
q[2] = d_q[id*X_STEP+X_OFFSET+2];
// transform to TpS^2
float dot = min(1.0f,max(-1.0f,q[0]*p[zi*3+0] + q[1]*p[zi*3+1]
+ q[2]*p[zi*3+2]));
float theta = acosf(dot);
float sinc;
if(theta < 1.e-8)
sinc = 1.0f;
else
sinc = theta/sinf(theta);
float x[3];
x[0] = (q[0]-p[zi*3+0]*dot)*sinc;
x[1] = (q[1]-p[zi*3+1]*dot)*sinc;
x[2] = (q[2]-p[zi*3+2]*dot)*sinc;
// rotate up to north pole
float xNorth[2];
xNorth[0] = sRnorths[zi*6+0]*x[0] + sRnorths[zi*6+1]*x[1]
+ sRnorths[zi*6+2]*x[2];
xNorth[1] = sRnorths[zi*6+3]*x[0] + sRnorths[zi*6+4]*x[1]
+ sRnorths[zi*6+5]*x[2];
// input sufficient statistics
xSSs[tid+(zi*7+0)*BLOCK_SIZE] += xNorth[0];
xSSs[tid+(zi*7+1)*BLOCK_SIZE] += xNorth[1];
xSSs[tid+(zi*7+2)*BLOCK_SIZE] += xNorth[0]*xNorth[0];
xSSs[tid+(zi*7+3)*BLOCK_SIZE] += xNorth[1]*xNorth[0];
xSSs[tid+(zi*7+4)*BLOCK_SIZE] += xNorth[0]*xNorth[1];
xSSs[tid+(zi*7+5)*BLOCK_SIZE] += xNorth[1]*xNorth[1];
xSSs[tid+(zi*7+6)*BLOCK_SIZE] += 1.0f;
}
}
}
// old reduction.....
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>1; s>>=1) {
if(tid < s)
{
#pragma unroll
for( int k=0; k<6*7; ++k) {
int tidk = k*BLOCK_SIZE+tid;
xSSs[tidk] += xSSs[tidk + s];
}
}
__syncthreads();
}
if(tid < 6*7) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) {
atomicAdd(&SSs[tid],xSSs[tid*BLOCK_SIZE]+xSSs[tid*BLOCK_SIZE+1]);
}
}
extern "C" void sufficientStatisticsOnTpS2GPU(float *h_p, float *d_p,
float *h_Rnorths, float *d_Rnorths, float *d_q, uint16_t *d_z ,int w, int h,
float *h_SSs, float *d_SSs)
{
for(uint32_t i=0; i<7*6; ++i)
h_SSs[i] =0.0f;
checkCudaErrors(cudaMemcpy(d_SSs, h_SSs, 6*7* sizeof(float),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_p, h_p, 6*3* sizeof(float),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Rnorths, h_Rnorths, 6*6* sizeof(float),
cudaMemcpyHostToDevice));
dim3 threads(16,16,1);
dim3 blocks(w/128+(w%128>0?1:0), h/64+(h%64>0?1:0),1);
sufficientStatisticsOnTpS2<<<blocks,threads>>>(d_p,d_Rnorths,
d_q, d_z,w,h,d_SSs);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(h_SSs, d_SSs, 6*7*sizeof(float),
cudaMemcpyDeviceToHost));
};
|
1530b12983dcc775159147323f3387a39952b4fb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Cambricon Corporation: 2018 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2018, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void RMSPropUpdate(int N, Dtype* g, Dtype* h,
Dtype rms_decay, Dtype delta, Dtype local_rate) {
CUDA_KERNEL_LOOP(i, N) {
float gi = g[i];
float hi = h[i] = rms_decay*h[i] + (1-rms_decay)*gi*gi;
g[i] = local_rate * g[i] / (sqrt(hi) + delta);
}
}
template <typename Dtype>
void rmsprop_update_gpu(int N, Dtype* g, Dtype* h, Dtype rms_decay,
Dtype delta, Dtype local_rate) {
RMSPropUpdate<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, g, h, rms_decay, delta, local_rate);
CUDA_POST_KERNEL_CHECK;
}
template void rmsprop_update_gpu<float>(int, float*, float*, float, float,
float);
template void rmsprop_update_gpu<double>(int, double*, double*, double, double,
double);
} // namespace caffe
|
1530b12983dcc775159147323f3387a39952b4fb.cu
|
/*
All modification made by Cambricon Corporation: © 2018 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2018, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void RMSPropUpdate(int N, Dtype* g, Dtype* h,
Dtype rms_decay, Dtype delta, Dtype local_rate) {
CUDA_KERNEL_LOOP(i, N) {
float gi = g[i];
float hi = h[i] = rms_decay*h[i] + (1-rms_decay)*gi*gi;
g[i] = local_rate * g[i] / (sqrt(hi) + delta);
}
}
template <typename Dtype>
void rmsprop_update_gpu(int N, Dtype* g, Dtype* h, Dtype rms_decay,
Dtype delta, Dtype local_rate) {
RMSPropUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, g, h, rms_decay, delta, local_rate);
CUDA_POST_KERNEL_CHECK;
}
template void rmsprop_update_gpu<float>(int, float*, float*, float, float,
float);
template void rmsprop_update_gpu<double>(int, double*, double*, double, double,
double);
} // namespace caffe
|
1e88d120b08e65e3919a8a6d3a95f2bf114fd436.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
//@HEADER
// ************************************************************************
//
// Kokkos v. 2.0
// Copyright (2019) Sandia Corporation
//
// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott ([email protected])
//
// ************************************************************************
//@HEADER
*/
#include "fill.hpp"
#include <mdspan/mdspan.hpp>
#include <memory>
#include <random>
#include <sstream>
#include <stdexcept>
#include <iostream>
//================================================================================
static constexpr int global_delta = 1;
static constexpr int global_repeat = 16;
//================================================================================
using size_type = int;
template <class T, size_t... Es>
using lmdspan = Kokkos::mdspan<T, Kokkos::extents<size_type, Es...>, Kokkos::layout_left>;
template <class T, size_t... Es>
using rmdspan = Kokkos::mdspan<T, Kokkos::extents<size_type, Es...>, Kokkos::layout_right>;
void throw_runtime_exception(const std::string &msg) {
std::ostringstream o;
o << msg;
throw std::runtime_error(o.str());
}
void cuda_internal_error_throw(hipError_t e, const char* name,
const char* file = NULL, const int line = 0) {
std::ostringstream out;
out << name << " error( " << hipGetErrorName(e)
<< "): " << hipGetErrorString(e);
if (file) {
out << " " << file << ":" << line;
}
throw_runtime_exception(out.str());
}
inline void cuda_internal_safe_call(hipError_t e, const char* name,
const char* file = NULL,
const int line = 0) {
if (hipSuccess != e) {
cuda_internal_error_throw(e, name, file, line);
}
}
#define CUDA_SAFE_CALL(call) \
cuda_internal_safe_call(call, #call, __FILE__, __LINE__)
//================================================================================
template <class F, class... Args>
__global__
void do_run_kernel(F f, Args... args) {
f(args...);
}
template <class F, class... Args>
float run_kernel_timed(size_t N, size_t M, F&& f, Args&&... args) {
hipEvent_t start, stop;
CUDA_SAFE_CALL(hipEventCreate(&start));
CUDA_SAFE_CALL(hipEventCreate(&stop));
CUDA_SAFE_CALL(hipEventRecord(start));
hipLaunchKernelGGL(( do_run_kernel), dim3((N+255)/256),dim3(256), 0, 0,
(F&&)f, ((Args&&) args)...
);
CUDA_SAFE_CALL(hipEventRecord(stop));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
float milliseconds = 0;
CUDA_SAFE_CALL(hipEventElapsedTime(&milliseconds, start, stop));
return milliseconds;
}
template <class MDSpan, class... DynSizes>
MDSpan fill_device_mdspan(MDSpan, DynSizes... dyn) {
using value_type = typename MDSpan::value_type;
auto buffer_size = MDSpan{nullptr, dyn...}.mapping().required_span_size();
auto host_buffer = std::make_unique<value_type[]>(
MDSpan{nullptr, dyn...}.mapping().required_span_size()
);
auto host_mdspan = MDSpan{host_buffer.get(), dyn...};
mdspan_benchmark::fill_random(host_mdspan);
value_type* device_buffer = nullptr;
CUDA_SAFE_CALL(hipMalloc(&device_buffer, buffer_size * sizeof(value_type)));
CUDA_SAFE_CALL(hipMemcpy(
device_buffer, host_buffer.get(), buffer_size * sizeof(value_type), hipMemcpyHostToDevice
));
return MDSpan{device_buffer, dyn...};
}
//================================================================================
template <class MDSpanMatrix, class... DynSizes>
void BM_MDSpan_CUDA_MatVec(benchmark::State& state, MDSpanMatrix, DynSizes... dyn) {
using value_type = typename MDSpanMatrix::value_type;
using MDSpanVector = lmdspan<value_type,Kokkos::dynamic_extent>;
auto A = fill_device_mdspan(MDSpanMatrix{}, dyn...);
auto x = fill_device_mdspan(MDSpanVector{}, A.extent(1));
auto y = fill_device_mdspan(MDSpanVector{}, A.extent(0));
auto lambda =
[=] __device__ {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>=A.extent(0)) return;
value_type y_i = 0;
for(size_t j = 0; j < A.extent(1); j ++) {
y_i += A(i,j) * x(j);
}
y(i) = y_i;
};
run_kernel_timed(A.extent(0),A.extent(1),lambda);
for (auto _ : state) {
auto timed = run_kernel_timed(A.extent(0),A.extent(1),lambda);
// units of cuda timer is milliseconds, units of iteration timer is seconds
state.SetIterationTime(timed * 1e-3);
}
size_t num_elements = 2 * A.extent(0) * A.extent(1) + 2 * A.extent(0);
state.SetBytesProcessed( num_elements * sizeof(value_type) * state.iterations() );
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUDA_SAFE_CALL(hipFree(A.data_handle()));
CUDA_SAFE_CALL(hipFree(x.data_handle()));
CUDA_SAFE_CALL(hipFree(y.data_handle()));
}
BENCHMARK_CAPTURE(BM_MDSpan_CUDA_MatVec, left, lmdspan<double,Kokkos::dynamic_extent,Kokkos::dynamic_extent>(), 100000, 5000);
BENCHMARK_CAPTURE(BM_MDSpan_CUDA_MatVec, right, rmdspan<double,Kokkos::dynamic_extent,Kokkos::dynamic_extent>(), 100000, 5000);
template <class MDSpanMatrix, class... DynSizes>
void BM_MDSpan_CUDA_MatVec_Raw_Right(benchmark::State& state, MDSpanMatrix, DynSizes... dyn) {
using value_type = typename MDSpanMatrix::value_type;
using MDSpanVector = lmdspan<value_type,Kokkos::dynamic_extent>;
auto A = fill_device_mdspan(MDSpanMatrix{}, dyn...);
auto x = fill_device_mdspan(MDSpanVector{}, A.extent(1));
auto y = fill_device_mdspan(MDSpanVector{}, A.extent(0));
size_t N = A.extent(0);
size_t M = A.extent(1);
value_type* p_A = A.data_handle();
value_type* p_x = x.data_handle();
value_type* p_y = y.data_handle();
auto lambda =
[=] __device__ {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>=N) return;
value_type y_i = 0;
for(size_t j = 0; j < M; j ++) {
y_i += p_A[i*M+j] * p_x[j];
}
p_y[i] = y_i;
};
run_kernel_timed(N,M,lambda);
for (auto _ : state) {
auto timed = run_kernel_timed(N,M,lambda);
// units of cuda timer is milliseconds, units of iteration timer is seconds
state.SetIterationTime(timed * 1e-3);
}
size_t num_elements = 2 * A.extent(0) * A.extent(1) + 2 * A.extent(0);
state.SetBytesProcessed( num_elements * sizeof(value_type) * state.iterations() );
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUDA_SAFE_CALL(hipFree(A.data_handle()));
CUDA_SAFE_CALL(hipFree(x.data_handle()));
CUDA_SAFE_CALL(hipFree(y.data_handle()));
}
BENCHMARK_CAPTURE(BM_MDSpan_CUDA_MatVec_Raw_Right, right, rmdspan<double,Kokkos::dynamic_extent,Kokkos::dynamic_extent>(), 100000, 5000);
template <class MDSpanMatrix, class... DynSizes>
void BM_MDSpan_CUDA_MatVec_Raw_Left(benchmark::State& state, MDSpanMatrix, DynSizes... dyn) {
using value_type = typename MDSpanMatrix::value_type;
using MDSpanVector = lmdspan<value_type,Kokkos::dynamic_extent>;
auto A = fill_device_mdspan(MDSpanMatrix{}, dyn...);
auto x = fill_device_mdspan(MDSpanVector{}, A.extent(1));
auto y = fill_device_mdspan(MDSpanVector{}, A.extent(0));
size_t N = A.extent(0);
size_t M = A.extent(1);
value_type* p_A = A.data_handle();
value_type* p_x = x.data_handle();
value_type* p_y = y.data_handle();
auto lambda =
[=] __device__ {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>=N) return;
value_type y_i = 0;
for(size_t j = 0; j < M; j ++) {
y_i += p_A[i+j*N] * p_x[j];
}
p_y[i] = y_i;
};
run_kernel_timed(N,M,lambda);
for (auto _ : state) {
auto timed = run_kernel_timed(N,M,lambda);
// units of cuda timer is milliseconds, units of iteration timer is seconds
state.SetIterationTime(timed * 1e-3);
}
size_t num_elements = 2 * A.extent(0) * A.extent(1) + 2 * A.extent(0);
state.SetBytesProcessed( num_elements * sizeof(value_type) * state.iterations());
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUDA_SAFE_CALL(hipFree(A.data_handle()));
CUDA_SAFE_CALL(hipFree(x.data_handle()));
CUDA_SAFE_CALL(hipFree(y.data_handle()));
}
BENCHMARK_CAPTURE(BM_MDSpan_CUDA_MatVec_Raw_Left, left, lmdspan<double,Kokkos::dynamic_extent,Kokkos::dynamic_extent>(), 100000, 5000);
BENCHMARK_MAIN();
|
1e88d120b08e65e3919a8a6d3a95f2bf114fd436.cu
|
/*
//@HEADER
// ************************************************************************
//
// Kokkos v. 2.0
// Copyright (2019) Sandia Corporation
//
// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott ([email protected])
//
// ************************************************************************
//@HEADER
*/
#include "fill.hpp"
#include <mdspan/mdspan.hpp>
#include <memory>
#include <random>
#include <sstream>
#include <stdexcept>
#include <iostream>
//================================================================================
static constexpr int global_delta = 1;
static constexpr int global_repeat = 16;
//================================================================================
using size_type = int;
template <class T, size_t... Es>
using lmdspan = Kokkos::mdspan<T, Kokkos::extents<size_type, Es...>, Kokkos::layout_left>;
template <class T, size_t... Es>
using rmdspan = Kokkos::mdspan<T, Kokkos::extents<size_type, Es...>, Kokkos::layout_right>;
void throw_runtime_exception(const std::string &msg) {
std::ostringstream o;
o << msg;
throw std::runtime_error(o.str());
}
void cuda_internal_error_throw(cudaError e, const char* name,
const char* file = NULL, const int line = 0) {
std::ostringstream out;
out << name << " error( " << cudaGetErrorName(e)
<< "): " << cudaGetErrorString(e);
if (file) {
out << " " << file << ":" << line;
}
throw_runtime_exception(out.str());
}
inline void cuda_internal_safe_call(cudaError e, const char* name,
const char* file = NULL,
const int line = 0) {
if (cudaSuccess != e) {
cuda_internal_error_throw(e, name, file, line);
}
}
#define CUDA_SAFE_CALL(call) \
cuda_internal_safe_call(call, #call, __FILE__, __LINE__)
//================================================================================
template <class F, class... Args>
__global__
void do_run_kernel(F f, Args... args) {
f(args...);
}
template <class F, class... Args>
float run_kernel_timed(size_t N, size_t M, F&& f, Args&&... args) {
cudaEvent_t start, stop;
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
CUDA_SAFE_CALL(cudaEventRecord(start));
do_run_kernel<<<(N+255)/256,256>>>(
(F&&)f, ((Args&&) args)...
);
CUDA_SAFE_CALL(cudaEventRecord(stop));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
float milliseconds = 0;
CUDA_SAFE_CALL(cudaEventElapsedTime(&milliseconds, start, stop));
return milliseconds;
}
template <class MDSpan, class... DynSizes>
MDSpan fill_device_mdspan(MDSpan, DynSizes... dyn) {
using value_type = typename MDSpan::value_type;
auto buffer_size = MDSpan{nullptr, dyn...}.mapping().required_span_size();
auto host_buffer = std::make_unique<value_type[]>(
MDSpan{nullptr, dyn...}.mapping().required_span_size()
);
auto host_mdspan = MDSpan{host_buffer.get(), dyn...};
mdspan_benchmark::fill_random(host_mdspan);
value_type* device_buffer = nullptr;
CUDA_SAFE_CALL(cudaMalloc(&device_buffer, buffer_size * sizeof(value_type)));
CUDA_SAFE_CALL(cudaMemcpy(
device_buffer, host_buffer.get(), buffer_size * sizeof(value_type), cudaMemcpyHostToDevice
));
return MDSpan{device_buffer, dyn...};
}
//================================================================================
template <class MDSpanMatrix, class... DynSizes>
void BM_MDSpan_CUDA_MatVec(benchmark::State& state, MDSpanMatrix, DynSizes... dyn) {
using value_type = typename MDSpanMatrix::value_type;
using MDSpanVector = lmdspan<value_type,Kokkos::dynamic_extent>;
auto A = fill_device_mdspan(MDSpanMatrix{}, dyn...);
auto x = fill_device_mdspan(MDSpanVector{}, A.extent(1));
auto y = fill_device_mdspan(MDSpanVector{}, A.extent(0));
auto lambda =
[=] __device__ {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>=A.extent(0)) return;
value_type y_i = 0;
for(size_t j = 0; j < A.extent(1); j ++) {
y_i += A(i,j) * x(j);
}
y(i) = y_i;
};
run_kernel_timed(A.extent(0),A.extent(1),lambda);
for (auto _ : state) {
auto timed = run_kernel_timed(A.extent(0),A.extent(1),lambda);
// units of cuda timer is milliseconds, units of iteration timer is seconds
state.SetIterationTime(timed * 1e-3);
}
size_t num_elements = 2 * A.extent(0) * A.extent(1) + 2 * A.extent(0);
state.SetBytesProcessed( num_elements * sizeof(value_type) * state.iterations() );
CUDA_SAFE_CALL(cudaDeviceSynchronize());
CUDA_SAFE_CALL(cudaFree(A.data_handle()));
CUDA_SAFE_CALL(cudaFree(x.data_handle()));
CUDA_SAFE_CALL(cudaFree(y.data_handle()));
}
BENCHMARK_CAPTURE(BM_MDSpan_CUDA_MatVec, left, lmdspan<double,Kokkos::dynamic_extent,Kokkos::dynamic_extent>(), 100000, 5000);
BENCHMARK_CAPTURE(BM_MDSpan_CUDA_MatVec, right, rmdspan<double,Kokkos::dynamic_extent,Kokkos::dynamic_extent>(), 100000, 5000);
template <class MDSpanMatrix, class... DynSizes>
void BM_MDSpan_CUDA_MatVec_Raw_Right(benchmark::State& state, MDSpanMatrix, DynSizes... dyn) {
using value_type = typename MDSpanMatrix::value_type;
using MDSpanVector = lmdspan<value_type,Kokkos::dynamic_extent>;
auto A = fill_device_mdspan(MDSpanMatrix{}, dyn...);
auto x = fill_device_mdspan(MDSpanVector{}, A.extent(1));
auto y = fill_device_mdspan(MDSpanVector{}, A.extent(0));
size_t N = A.extent(0);
size_t M = A.extent(1);
value_type* p_A = A.data_handle();
value_type* p_x = x.data_handle();
value_type* p_y = y.data_handle();
auto lambda =
[=] __device__ {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>=N) return;
value_type y_i = 0;
for(size_t j = 0; j < M; j ++) {
y_i += p_A[i*M+j] * p_x[j];
}
p_y[i] = y_i;
};
run_kernel_timed(N,M,lambda);
for (auto _ : state) {
auto timed = run_kernel_timed(N,M,lambda);
// units of cuda timer is milliseconds, units of iteration timer is seconds
state.SetIterationTime(timed * 1e-3);
}
size_t num_elements = 2 * A.extent(0) * A.extent(1) + 2 * A.extent(0);
state.SetBytesProcessed( num_elements * sizeof(value_type) * state.iterations() );
CUDA_SAFE_CALL(cudaDeviceSynchronize());
CUDA_SAFE_CALL(cudaFree(A.data_handle()));
CUDA_SAFE_CALL(cudaFree(x.data_handle()));
CUDA_SAFE_CALL(cudaFree(y.data_handle()));
}
BENCHMARK_CAPTURE(BM_MDSpan_CUDA_MatVec_Raw_Right, right, rmdspan<double,Kokkos::dynamic_extent,Kokkos::dynamic_extent>(), 100000, 5000);
template <class MDSpanMatrix, class... DynSizes>
void BM_MDSpan_CUDA_MatVec_Raw_Left(benchmark::State& state, MDSpanMatrix, DynSizes... dyn) {
using value_type = typename MDSpanMatrix::value_type;
using MDSpanVector = lmdspan<value_type,Kokkos::dynamic_extent>;
auto A = fill_device_mdspan(MDSpanMatrix{}, dyn...);
auto x = fill_device_mdspan(MDSpanVector{}, A.extent(1));
auto y = fill_device_mdspan(MDSpanVector{}, A.extent(0));
size_t N = A.extent(0);
size_t M = A.extent(1);
value_type* p_A = A.data_handle();
value_type* p_x = x.data_handle();
value_type* p_y = y.data_handle();
auto lambda =
[=] __device__ {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>=N) return;
value_type y_i = 0;
for(size_t j = 0; j < M; j ++) {
y_i += p_A[i+j*N] * p_x[j];
}
p_y[i] = y_i;
};
run_kernel_timed(N,M,lambda);
for (auto _ : state) {
auto timed = run_kernel_timed(N,M,lambda);
// units of cuda timer is milliseconds, units of iteration timer is seconds
state.SetIterationTime(timed * 1e-3);
}
size_t num_elements = 2 * A.extent(0) * A.extent(1) + 2 * A.extent(0);
state.SetBytesProcessed( num_elements * sizeof(value_type) * state.iterations());
CUDA_SAFE_CALL(cudaDeviceSynchronize());
CUDA_SAFE_CALL(cudaFree(A.data_handle()));
CUDA_SAFE_CALL(cudaFree(x.data_handle()));
CUDA_SAFE_CALL(cudaFree(y.data_handle()));
}
BENCHMARK_CAPTURE(BM_MDSpan_CUDA_MatVec_Raw_Left, left, lmdspan<double,Kokkos::dynamic_extent,Kokkos::dynamic_extent>(), 100000, 5000);
BENCHMARK_MAIN();
|
2bc57ec213798dec8c25de10020ac7ce2f6b2b0f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file
* \brief Interface implementation for GPU solvers to be called as a library
*
* \author Nicholas Curtis
* \date 03/09/2015
*
* Contains initialization, integration and cleanup functions
*/
#include "solver_interface.cuh"
#ifdef GENERATE_DOCS
namespace genericcu {
#endif
//! Padded # of ODEs to solve
int padded;
//! The solver memory structs
solver_memory* host_solver, *device_solver;
//! The mechanism memory structs
mechanism_memory* host_mech, *device_mech;
//! block and grid sizes
dim3 dimBlock, dimGrid;
//! result flag
int* result_flag;
//! temorary storage
double* y_temp;
/**
* \brief A convienience method to copy memory between host pointers of different pitches, widths and heights.
* Enables easier use of CUDA's hipMemcpy2D functions.
*
* \param[out] dst The destination array
* \param[in] pitch_dst The width (in bytes) of the destination array.
This corresponds to the padded number of IVPs to be solved.
* \param[in] src The source pointer
* \param[in] pitch_src The width (in bytes) of the source array.
This corresponds to the (non-padded) number of IVPs read by read_initial_conditions
* \param[in] offset The offset within the source array (IVP index) to copy from.
This is useful in the case (for large models) where the solver and state vector memory will not fit in device memory
and the integration must be split into multiple kernel calls.
* \param[in] width The size (in bytes) of memory to copy for each entry in the state vector
* \param[in] height The number of entries in the state vector
*/
inline void memcpy2D_in(double* dst, const int pitch_dst, double const * src, const int pitch_src,
const int offset, const size_t width, const int height) {
for (int i = 0; i < height; ++i)
{
memcpy(dst, &src[offset], width);
dst += pitch_dst;
src += pitch_src;
}
}
/**
* \brief A convienience method to copy memory between host pointers of different pitches, widths and heights.
* Enables easier use of CUDA's hipMemcpy2D functions.
*
* \param[out] dst The destination array
* \param[in] pitch_dst The width (in bytes) of the source array.
This corresponds to the (non-padded) number of IVPs read by read_initial_conditions
* \param[in] src The source pointer
* \param[in] pitch_src The width (in bytes) of the destination array.
This corresponds to the padded number of IVPs to be solved.
* \param[in] offset The offset within the destination array (IVP index) to copy to.
This is useful in the case (for large models) where the solver and state vector memory will not fit in device memory
and the integration must be split into multiple kernel calls.
* \param[in] width The size (in bytes) of memory to copy for each entry in the state vector
* \param[in] height The number of entries in the state vector
*/
inline void memcpy2D_out(double* dst, const int pitch_dst, double const * src, const int pitch_src,
const int offset, const size_t width, const int height) {
for (int i = 0; i < height; ++i)
{
memcpy(&dst[offset], src, width);
dst += pitch_dst;
src += pitch_src;
}
}
/**
* \brief Initializes the solver
* \param[in] NUM The number of ODEs to integrate
* \param[in] device The CUDA device number, if < 0 set to the first available GPU
*/
void accelerInt_initialize(int NUM, int device) {
device = device < 0 ? 0 : device;
// set & initialize device using command line argument (if any)
hipDeviceProp_t devProp;
// get number of devices
int num_devices;
hipGetDeviceCount(&num_devices);
if ((device >= 0) && (device < num_devices))
{
cudaErrorCheck( hipSetDevice (device) );
}
else
{
// not in range, error
printf("Error: GPU device number not in correct range\n");
printf("Provide number between 0 and %i\n", num_devices - 1);
exit(1);
}
cudaErrorCheck (hipGetDeviceProperties(&devProp, device));
// reset device
cudaErrorCheck( hipDeviceReset() );
cudaErrorCheck( hipPeekAtLastError() );
cudaErrorCheck( hipDeviceSynchronize() );
//bump up shared mem bank size
cudaErrorCheck(hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte));
//and L1 size
cudaErrorCheck(hipDeviceSetCacheConfig(hipFuncCachePreferL1));
//get the memory sizes
size_t size_per_thread = required_mechanism_size() + required_solver_size();
size_t free_mem = 0;
size_t total_mem = 0;
cudaErrorCheck( hipMemGetInfo (&free_mem, &total_mem) );
//conservatively estimate the maximum allowable threads
int max_threads = int(floor(0.8 * ((double)free_mem) / ((double)size_per_thread)));
int padded = min(NUM, max_threads);
//padded is next factor of block size up
padded = int(ceil(padded / float(TARGET_BLOCK_SIZE)) * TARGET_BLOCK_SIZE);
if (padded == 0)
{
printf("Mechanism is too large to fit into global CUDA memory... exiting.");
exit(-1);
}
//initalize memory
initialize_gpu_memory(padded, &host_mech, &device_mech);
initialize_solver(padded, &host_solver, &device_solver);
//grid sizes
dimBlock = dim3(TARGET_BLOCK_SIZE, 1);
dimGrid = dim3(padded / TARGET_BLOCK_SIZE, 1 );
//local storage
result_flag = (int*)malloc(padded * sizeof(int));
y_temp = (double*)malloc(padded * NSP * sizeof(double));
}
/**
* \brief integrate NUM odes from time `t_start` to time `t_end`, using stepsizes of `stepsize`
*
* \param[in] NUM The number of ODEs to integrate. This should be the size of the leading dimension of `y_host` and `var_host`. @see accelerint_indx
* \param[in] t_start The starting time
* \param[in] t_end The end time
* \param[in] stepsize The integration step size. If `stepsize` < 0, the step size will be set to `t_end - t`
* \param[in,out] y_host The state vectors to integrate.
* \param[in] var_host The parameters to use in dydt() and eval_jacob()
*
*/
void accelerInt_integrate(const int NUM, const double t_start, const double t_end, const double stepsize,
double * __restrict__ y_host, const double * __restrict__ var_host)
{
double step = stepsize < 0 ? t_end - t_start : stepsize;
double t = t_start;
double t_next = fmin(end_time, t + step);
int numSteps = 0;
// time integration loop
while (t + EPS < t_end)
{
numSteps++;
int num_solved = 0;
while (num_solved < NUM)
{
int num_cond = min(NUM - num_solved, padded);
cudaErrorCheck( hipMemcpy (host_mech->var, &var_host[num_solved],
num_cond * sizeof(double), hipMemcpyHostToDevice));
//copy our memory into y_temp
memcpy2D_in(y_temp, padded, y_host, NUM,
num_solved, num_cond * sizeof(double), NSP);
// transfer memory to GPU
cudaErrorCheck( hipMemcpy2D (host_mech->y, padded * sizeof(double),
y_temp, padded * sizeof(double),
num_cond * sizeof(double), NSP,
hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( intDriver) , dim3(dimGrid), dim3(dimBlock), SHARED_SIZE , 0, num_cond, t, t_next, host_mech->var, host_mech->y, device_mech, device_solver);
#ifdef DEBUG
cudaErrorCheck( hipPeekAtLastError() );
cudaErrorCheck( hipDeviceSynchronize() );
#endif
// copy the result flag back
cudaErrorCheck( hipMemcpy(result_flag, host_solver->result, num_cond * sizeof(int), hipMemcpyDeviceToHost) );
check_error(num_cond, result_flag);
// transfer memory back to CPU
cudaErrorCheck( hipMemcpy2D (y_temp, padded * sizeof(double),
host_mech->y, padded * sizeof(double),
num_cond * sizeof(double), NSP,
hipMemcpyDeviceToHost) );
memcpy2D_out(y_host, NUM, y_temp, padded,
num_solved, num_cond * sizeof(double), NSP);
num_solved += num_cond;
}
t = t_next;
t_next = fmin(t_end, (numSteps + 1) * step);
}
}
/**
* \brief Cleans up the solver
*/
void accelerInt_cleanup() {
free_gpu_memory(&host_mech, &device_mech);
cleanup_solver(&host_solver, &device_solver);
free(y_temp);
free(host_mech);
free(host_solver);
free(result_flag);
cudaErrorCheck( hipDeviceReset() );
}
#ifdef GENERATE_DOCS
}
#endif
|
2bc57ec213798dec8c25de10020ac7ce2f6b2b0f.cu
|
/**
* \file
* \brief Interface implementation for GPU solvers to be called as a library
*
* \author Nicholas Curtis
* \date 03/09/2015
*
* Contains initialization, integration and cleanup functions
*/
#include "solver_interface.cuh"
#ifdef GENERATE_DOCS
namespace genericcu {
#endif
//! Padded # of ODEs to solve
int padded;
//! The solver memory structs
solver_memory* host_solver, *device_solver;
//! The mechanism memory structs
mechanism_memory* host_mech, *device_mech;
//! block and grid sizes
dim3 dimBlock, dimGrid;
//! result flag
int* result_flag;
//! temorary storage
double* y_temp;
/**
* \brief A convienience method to copy memory between host pointers of different pitches, widths and heights.
* Enables easier use of CUDA's cudaMemcpy2D functions.
*
* \param[out] dst The destination array
* \param[in] pitch_dst The width (in bytes) of the destination array.
This corresponds to the padded number of IVPs to be solved.
* \param[in] src The source pointer
* \param[in] pitch_src The width (in bytes) of the source array.
This corresponds to the (non-padded) number of IVPs read by read_initial_conditions
* \param[in] offset The offset within the source array (IVP index) to copy from.
This is useful in the case (for large models) where the solver and state vector memory will not fit in device memory
and the integration must be split into multiple kernel calls.
* \param[in] width The size (in bytes) of memory to copy for each entry in the state vector
* \param[in] height The number of entries in the state vector
*/
inline void memcpy2D_in(double* dst, const int pitch_dst, double const * src, const int pitch_src,
const int offset, const size_t width, const int height) {
for (int i = 0; i < height; ++i)
{
memcpy(dst, &src[offset], width);
dst += pitch_dst;
src += pitch_src;
}
}
/**
* \brief A convienience method to copy memory between host pointers of different pitches, widths and heights.
* Enables easier use of CUDA's cudaMemcpy2D functions.
*
* \param[out] dst The destination array
* \param[in] pitch_dst The width (in bytes) of the source array.
This corresponds to the (non-padded) number of IVPs read by read_initial_conditions
* \param[in] src The source pointer
* \param[in] pitch_src The width (in bytes) of the destination array.
This corresponds to the padded number of IVPs to be solved.
* \param[in] offset The offset within the destination array (IVP index) to copy to.
This is useful in the case (for large models) where the solver and state vector memory will not fit in device memory
and the integration must be split into multiple kernel calls.
* \param[in] width The size (in bytes) of memory to copy for each entry in the state vector
* \param[in] height The number of entries in the state vector
*/
inline void memcpy2D_out(double* dst, const int pitch_dst, double const * src, const int pitch_src,
const int offset, const size_t width, const int height) {
for (int i = 0; i < height; ++i)
{
memcpy(&dst[offset], src, width);
dst += pitch_dst;
src += pitch_src;
}
}
/**
* \brief Initializes the solver
* \param[in] NUM The number of ODEs to integrate
* \param[in] device The CUDA device number, if < 0 set to the first available GPU
*/
void accelerInt_initialize(int NUM, int device) {
device = device < 0 ? 0 : device;
// set & initialize device using command line argument (if any)
cudaDeviceProp devProp;
// get number of devices
int num_devices;
cudaGetDeviceCount(&num_devices);
if ((device >= 0) && (device < num_devices))
{
cudaErrorCheck( cudaSetDevice (device) );
}
else
{
// not in range, error
printf("Error: GPU device number not in correct range\n");
printf("Provide number between 0 and %i\n", num_devices - 1);
exit(1);
}
cudaErrorCheck (cudaGetDeviceProperties(&devProp, device));
// reset device
cudaErrorCheck( cudaDeviceReset() );
cudaErrorCheck( cudaPeekAtLastError() );
cudaErrorCheck( cudaDeviceSynchronize() );
//bump up shared mem bank size
cudaErrorCheck(cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte));
//and L1 size
cudaErrorCheck(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));
//get the memory sizes
size_t size_per_thread = required_mechanism_size() + required_solver_size();
size_t free_mem = 0;
size_t total_mem = 0;
cudaErrorCheck( cudaMemGetInfo (&free_mem, &total_mem) );
//conservatively estimate the maximum allowable threads
int max_threads = int(floor(0.8 * ((double)free_mem) / ((double)size_per_thread)));
int padded = min(NUM, max_threads);
//padded is next factor of block size up
padded = int(ceil(padded / float(TARGET_BLOCK_SIZE)) * TARGET_BLOCK_SIZE);
if (padded == 0)
{
printf("Mechanism is too large to fit into global CUDA memory... exiting.");
exit(-1);
}
//initalize memory
initialize_gpu_memory(padded, &host_mech, &device_mech);
initialize_solver(padded, &host_solver, &device_solver);
//grid sizes
dimBlock = dim3(TARGET_BLOCK_SIZE, 1);
dimGrid = dim3(padded / TARGET_BLOCK_SIZE, 1 );
//local storage
result_flag = (int*)malloc(padded * sizeof(int));
y_temp = (double*)malloc(padded * NSP * sizeof(double));
}
/**
* \brief integrate NUM odes from time `t_start` to time `t_end`, using stepsizes of `stepsize`
*
* \param[in] NUM The number of ODEs to integrate. This should be the size of the leading dimension of `y_host` and `var_host`. @see accelerint_indx
* \param[in] t_start The starting time
* \param[in] t_end The end time
* \param[in] stepsize The integration step size. If `stepsize` < 0, the step size will be set to `t_end - t`
* \param[in,out] y_host The state vectors to integrate.
* \param[in] var_host The parameters to use in dydt() and eval_jacob()
*
*/
void accelerInt_integrate(const int NUM, const double t_start, const double t_end, const double stepsize,
double * __restrict__ y_host, const double * __restrict__ var_host)
{
double step = stepsize < 0 ? t_end - t_start : stepsize;
double t = t_start;
double t_next = fmin(end_time, t + step);
int numSteps = 0;
// time integration loop
while (t + EPS < t_end)
{
numSteps++;
int num_solved = 0;
while (num_solved < NUM)
{
int num_cond = min(NUM - num_solved, padded);
cudaErrorCheck( cudaMemcpy (host_mech->var, &var_host[num_solved],
num_cond * sizeof(double), cudaMemcpyHostToDevice));
//copy our memory into y_temp
memcpy2D_in(y_temp, padded, y_host, NUM,
num_solved, num_cond * sizeof(double), NSP);
// transfer memory to GPU
cudaErrorCheck( cudaMemcpy2D (host_mech->y, padded * sizeof(double),
y_temp, padded * sizeof(double),
num_cond * sizeof(double), NSP,
cudaMemcpyHostToDevice) );
intDriver <<< dimGrid, dimBlock, SHARED_SIZE >>> (num_cond, t, t_next, host_mech->var, host_mech->y, device_mech, device_solver);
#ifdef DEBUG
cudaErrorCheck( cudaPeekAtLastError() );
cudaErrorCheck( cudaDeviceSynchronize() );
#endif
// copy the result flag back
cudaErrorCheck( cudaMemcpy(result_flag, host_solver->result, num_cond * sizeof(int), cudaMemcpyDeviceToHost) );
check_error(num_cond, result_flag);
// transfer memory back to CPU
cudaErrorCheck( cudaMemcpy2D (y_temp, padded * sizeof(double),
host_mech->y, padded * sizeof(double),
num_cond * sizeof(double), NSP,
cudaMemcpyDeviceToHost) );
memcpy2D_out(y_host, NUM, y_temp, padded,
num_solved, num_cond * sizeof(double), NSP);
num_solved += num_cond;
}
t = t_next;
t_next = fmin(t_end, (numSteps + 1) * step);
}
}
/**
* \brief Cleans up the solver
*/
void accelerInt_cleanup() {
free_gpu_memory(&host_mech, &device_mech);
cleanup_solver(&host_solver, &device_solver);
free(y_temp);
free(host_mech);
free(host_solver);
free(result_flag);
cudaErrorCheck( cudaDeviceReset() );
}
#ifdef GENERATE_DOCS
}
#endif
|
ab461b47ea3f873d3f548d7548cb65f74299c4ae.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::host_vector<int> h_in(idata, idata + n);
thrust::host_vector<int> h_out(odata, odata + n);
thrust::device_vector<int> dv_in = h_in;
thrust::device_vector<int> dv_out = h_out;
timer().startGpuTimer();
thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
timer().endGpuTimer();
h_out = dv_out;
thrust::copy(h_out.begin(), h_out.end(), odata);
}
}
}
|
ab461b47ea3f873d3f548d7548cb65f74299c4ae.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::host_vector<int> h_in(idata, idata + n);
thrust::host_vector<int> h_out(odata, odata + n);
thrust::device_vector<int> dv_in = h_in;
thrust::device_vector<int> dv_out = h_out;
timer().startGpuTimer();
thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
timer().endGpuTimer();
h_out = dv_out;
thrust::copy(h_out.begin(), h_out.end(), odata);
}
}
}
|
ac2235065b9dd63221919cbfcc3ceb6c55357dd9.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Test the constant memory of thee DEVICE
* The constant memory does not work on NVIDIA GeForce GT 620 !!
*/
#if 1
#include <stdio.h> /* printf, scanf, puts, NULL */
#include <iostream>
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
#include "macro.h"
#include "type.h"
#include "redutil2.h"
using namespace redutil2;
// The Runge-Kutta matrix
var_t a[] =
{
1.0, -10.0,
0.5, -0.25
};
__constant__ var_t dc_a[sizeof(a) / sizeof(var_t)];
//! Print pointer and dereferennce it
__global__
void print_ptr(const var_t* a)
{
uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (0 == tid)
{
printf("%p\n", a);
printf("a[0] = %25.16le\n", a[0]);
printf("a[1] = %25.16le\n", a[1]);
printf("a[2] = %25.16le\n", a[2]);
printf("a[3] = %25.16le\n", a[3]);
}
}
int main()
{
try
{
var_t* d_a = NULL;
ALLOCATE_DEVICE_VECTOR((void**)&d_a, sizeof(a));
copy_vector_to_device(d_a, a, sizeof(a));
CUDA_SAFE_CALL(hipMemcpyToSymbol(dc_a, a, sizeof(a)));
//copy_constant_to_device(dc_a, a, sizeof(a));
printf("d_a:\n");
hipLaunchKernelGGL(( print_ptr), dim3(1), dim3(1), 0, 0, d_a);
CUDA_CHECK_ERROR();
printf("dc_a:\n");
hipLaunchKernelGGL(( print_ptr), dim3(1), dim3(1), 0, 0, dc_a);
CUDA_CHECK_ERROR();
FREE_DEVICE_VECTOR((void**)&d_a);
}
catch (const std::string& msg)
{
std::cerr << "Error: " << msg << "." << std::endl;
}
return 0;
}
#endif
/*
* 2016.11.11. - 11.13. TEST OK
* Allocation of array of pointers
* Allocation of each element in the array
*/
#if 0
#include <stdio.h> /* printf, scanf, puts, NULL */
#include <stdlib.h> /* srand, rand, malloc */
#include <iostream>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "type.h"
#include "macro.h"
#include "redutil2.h"
using namespace redutil2;
namespace kernel_test
{
__global__
void print_array(int **a, uint32_t n_vct)
{
const int tid = threadIdx.x;
if (0 == tid)
{
for (uint32_t i = 0; i < n_vct; i++)
{
printf("[%u]: %p *(_+%u): %p\n", i, a[i], i, *(a+i));
}
}
}
__global__
void print_array(int *a, uint32_t n_arr)
{
const int tid = threadIdx.x;
if (0 == tid)
{
for (uint32_t i = 0; i < n_arr; i++)
{
printf("\t[%u]: %d\n", i, a[i]);
}
}
}
} /* kernel_test */
void print_array(int **a, uint32_t n_vct)
{
for (uint32_t i = 0; i < n_vct; i++)
{
printf(" +%u: %p\t", i, a+i);
printf("[%u]: %p *( +%u): %p\n", i, a[i], i, *(a+i));
}
}
void print_array(int *a, uint32_t n_arr)
{
for (uint32_t i = 0; i < n_arr; i++)
{
printf("\t[%u]: %d\n", i, a[i]);
}
}
int main()
{
static const uint32_t n_vct = 5;
static const uint32_t n_arr = 9;
int** h_k = NULL;
int** d_k = NULL;
int** tmp = NULL;
try
{
printf("h_k: %p\t", h_k);
// Allocate HOST memory
ALLOCATE_HOST_VECTOR((void**)&h_k, n_vct*sizeof(int*));
printf("after allocation: %p\n", h_k);
for (uint32_t i = 0; i < n_vct; i++)
{
printf("h_k[%u]: %p\t", i, h_k[i]);
ALLOCATE_HOST_VECTOR((void**)(h_k + i), n_arr*sizeof(int));
printf("after allocation: %p\n", h_k[i]);
print_array(*(h_k + i), n_arr);
}
printf("tmp: %p\t", tmp);
ALLOCATE_HOST_VECTOR((void**)&tmp, n_vct*sizeof(int*));
printf("after allocation: %p\n", tmp);
// Allocate DEVICE memory
printf("d_k: %p\t", d_k);
ALLOCATE_DEVICE_VECTOR((void**)(&d_k), n_vct*sizeof(int*));
printf("after allocation: %p\n", d_k);
for (uint32_t i = 0; i < n_vct; i++)
{
printf("tmp[%u]: %p\t", i, tmp[i]);
ALLOCATE_DEVICE_VECTOR((void**)(tmp + i), n_arr*sizeof(int));
printf("after allocation: %p\n", tmp[i]);
hipLaunchKernelGGL(( kernel_test::print_array), dim3(1), dim3(1), 0, 0, *(tmp + i), n_arr);
CUDA_CHECK_ERROR();
hipDeviceSynchronize();
}
CUDA_SAFE_CALL(hipMemcpy(d_k, tmp, n_vct * sizeof(int*), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_test::print_array), dim3(1), dim3(1), 0, 0, d_k, n_vct);
CUDA_CHECK_ERROR();
hipDeviceSynchronize();
// Populate data
for (uint32_t i = 0; i < n_vct; i++)
{
for (uint32_t j = 0; j < n_arr; j++)
{
*(*(h_k+i)+j) = i*10 + j;
}
printf("h_k[%u]: %p\n", i, h_k[i]);
print_array(*(h_k + i), n_arr);
printf("\n");
printf("tmp[%u]: %p\n", i, tmp[i]);
CUDA_SAFE_CALL(hipMemcpy(tmp[i], h_k[i], n_arr * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_test::print_array), dim3(1), dim3(1), 0, 0, tmp[i], n_arr);
CUDA_CHECK_ERROR();
hipDeviceSynchronize();
}
// Deallocate memory
for (uint32_t i = 0; i < n_vct; i++)
{
FREE_HOST_VECTOR((void**)(h_k + i));
FREE_DEVICE_VECTOR((void**)(tmp + i));
}
FREE_HOST_VECTOR((void**)&h_k);
FREE_HOST_VECTOR((void**)&tmp);
FREE_DEVICE_VECTOR((void**)&d_k);
}
catch (const std::string& msg)
{
std::cerr << "Error: " << msg << std::endl;
}
return 0;
}
#endif
/*
* 2016.11.13. - 11.13. TEST OK
* Compute the linear combination of arrays on the DEVICE
* and comapre the results those computed on the HOST
*/
#if 0
#include <stdio.h> /* printf, scanf, puts, NULL */
#include <stdlib.h> /* srand, rand, malloc */
#include <time.h> /* time */
#include <iostream>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "type.h"
#include "macro.h"
#include "redutil2.h"
using namespace redutil2;
namespace kernel_test
{
__global__
void print_array(var_t *a, uint32_t n_arr)
{
const int tid = threadIdx.x;
if (0 == tid)
{
for (uint32_t i = 0; i < n_arr; i++)
{
printf("\t[%u]: %g\n", i, a[i]);
}
}
}
//! Calculate the special case of linear combination of vectors, a[i] = b[i] + sum (coeff[j] * c[j][i])
/*
\param a vector which will contain the result
\param b vector to which the linear combination will be added
\param c vectors which will linear combined
\param coeff vector which contains the weights (coefficients)
\param n_vct the number of vectors to combine
\param n_var the number of elements in the vectors
*/
__global__
void calc_lin_comb_s(var_t* a, const var_t* b, const var_t* const *c, const var_t* coeff, uint16_t n_vct, uint32_t n_var)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n_var)
{
var_t d = 0.0;
for (uint16_t j = 0; j < n_vct; j++)
{
if (0.0 == coeff[j])
{
continue;
}
d += coeff[j] * c[j][tid];
}
a[tid] = b[tid] + d;
}
}
} /* kernel_test */
int main()
{
static const uint32_t n_vct = 5;
static const uint32_t n_arr = 3000;
var_t** h_k = NULL;
var_t** d_k = NULL;
var_t** tmp = NULL;
var_t* h_a = NULL;
var_t* h_a0 = NULL; // Will hold a copy of d_a
var_t* h_b = NULL;
var_t* h_coeff = NULL;
var_t* d_a = NULL;
var_t* d_b = NULL;
var_t* d_coeff = NULL;
try
{
// Allocate HOST memory
ALLOCATE_HOST_VECTOR((void**)&h_k, n_vct*sizeof(var_t*));
for (uint32_t i = 0; i < n_vct; i++)
{
ALLOCATE_HOST_VECTOR((void**)(h_k + i), n_arr*sizeof(var_t));
}
ALLOCATE_HOST_VECTOR((void**)&tmp, n_vct*sizeof(var_t*));
ALLOCATE_HOST_VECTOR((void**)&h_a, n_arr*sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&h_a0, n_arr*sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&h_b, n_arr*sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&h_coeff, n_vct*sizeof(var_t));
// Allocate DEVICE memory
ALLOCATE_DEVICE_VECTOR((void**)(&d_k), n_vct*sizeof(var_t*));
for (uint32_t i = 0; i < n_vct; i++)
{
ALLOCATE_DEVICE_VECTOR((void**)(tmp + i), n_arr*sizeof(var_t));
}
CUDA_SAFE_CALL(hipMemcpy(d_k, tmp, n_vct * sizeof(var_t*), hipMemcpyHostToDevice));
ALLOCATE_DEVICE_VECTOR((void**)&d_a, n_arr*sizeof(var_t));
ALLOCATE_DEVICE_VECTOR((void**)&d_b, n_arr*sizeof(var_t));
ALLOCATE_DEVICE_VECTOR((void**)&d_coeff, n_vct*sizeof(var_t));
// Populate data
srand(time(NULL));
for (uint32_t i = 0; i < n_vct; i++)
{
for (uint32_t j = 0; j < n_arr; j++)
{
var_t r = (var_t)rand()/RAND_MAX; //returns a pseudo-random integer between 0 and RAND_MAX
*(*(h_k+i)+j) = r;
}
CUDA_SAFE_CALL(hipMemcpy(tmp[i], h_k[i], n_arr * sizeof(var_t), hipMemcpyHostToDevice));
}
for (uint32_t j = 0; j < n_arr; j++)
{
h_a[j] = 0;
h_b[j] = 0;
}
for (uint32_t j = 0; j < n_vct; j++)
{
h_coeff[j] = 1;
}
h_coeff[4] = -1;
CUDA_SAFE_CALL(hipMemcpy(d_a, h_a, n_arr * sizeof(var_t), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_b, h_b, n_arr * sizeof(var_t), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_coeff, h_coeff, n_vct * sizeof(var_t), hipMemcpyHostToDevice));
// Test the tools::calc_lin_comb_s() and gpu_calc_lin_comb_s() functions
// Compute a[i] = b[i] + f*c[i]
{
printf("Compute a[i] = b[i] + f*c[i]\n\n");
var_t f = 2.0;
var_t *h_c = *h_k;
tools::calc_lin_comb_s(h_a, h_b, h_c, f, n_arr);
var_t *d_c = *tmp;
gpu_calc_lin_comb_s( d_a, d_b, d_c, f, n_arr, 0, false);
//printf("h_a:\n");
//print_array("", n_arr, h_a, PROC_UNIT_CPU);
//printf("d_a:\n");
//print_array("", n_arr, d_a, PROC_UNIT_GPU);
CUDA_SAFE_CALL(hipMemcpy(h_a0, d_a, n_arr * sizeof(var_t), hipMemcpyDeviceToHost));
for (uint32_t j = 0; j < n_arr; j++)
{
if (0 != fabs(h_a[j] - h_a0[j]))
{
printf("Difference: j = %6u : %g\n", j, h_a[j] - h_a0[j]);
}
}
}
// Test the tools::calc_lin_comb_s() and gpu_calc_lin_comb_s() functions
// Compute a[i] = b[i] + sum (coeff[j] * c[j][i])
{
printf("Compute a[i] = b[i] + sum (coeff[j] * c[j][i])\n\n");
tools::calc_lin_comb_s(h_a, h_b, h_k, h_coeff, n_vct, n_arr);
gpu_calc_lin_comb_s( d_a, d_b, d_k, d_coeff, n_vct, n_arr, 0, false);
CUDA_SAFE_CALL(hipMemcpy(h_a0, d_a, n_arr * sizeof(var_t), hipMemcpyDeviceToHost));
for (uint32_t j = 0; j < n_arr; j++)
{
if (0 != fabs(h_a[j] - h_a0[j]))
{
printf("Difference: j = %6u : %g\n", j, h_a[j] - h_a0[j]);
}
}
}
// Deallocate memory
for (uint32_t i = 0; i < n_vct; i++)
{
FREE_HOST_VECTOR((void**)(h_k + i));
FREE_DEVICE_VECTOR((void**)(tmp + i));
}
FREE_HOST_VECTOR((void**)&h_k);
FREE_HOST_VECTOR((void**)&tmp);
FREE_DEVICE_VECTOR((void**)&d_k);
FREE_HOST_VECTOR((void**)&h_a);
FREE_HOST_VECTOR((void**)&h_a0);
FREE_HOST_VECTOR((void**)&h_b);
FREE_HOST_VECTOR((void**)&h_coeff);
FREE_DEVICE_VECTOR((void**)&d_a);
FREE_DEVICE_VECTOR((void**)&d_b);
FREE_DEVICE_VECTOR((void**)&d_coeff);
}
catch (const std::string& msg)
{
std::cerr << "Error: " << msg << std::endl;
}
std::cout << "Compute the linear combination of arrays on the DEVICE and comapre the results those computed on the HOST done.\n";
return 0;
}
#endif
/*
* 2016.11.14. -
* Gravitational interaction computations
*/
#if 0
/*
Premature optimization is the ROOT OF ALL EVIL. Always remember the three rules of optimization!
1. Don't optimize.
2. If you are an expert, see rule #1
3. If you are an expert and can justify the need, then use the following procedure:
- Code it unoptimized
- determine how fast is "Fast enough"--Note which user requirement/story requires that metric.
- Write a speed test
- Test existing code--If it's fast enough, you're done.
- Recode it optimized
- Test optimized code. IF it doesn't meet the metric, throw it away and keep the original.
- If it meets the test, keep the original code in as comments
*/
#include <stdio.h> /* printf, scanf, puts, NULL */
#include <stdlib.h> /* srand, rand, malloc */
#include <time.h> /* time */
#include <iostream>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "type.h"
#include "macro.h"
#include "redutil2.h"
#ifdef _WIN32
#include <Windows.h>
#else
#include <sys/time.h>
#include <ctime>
#endif
using namespace redutil2;
// Global variables
uint32_t n_tpb = 128;
uint32_t n_obj = 0;
var_t* h_p = NULL;
var_t* d_p = NULL;
dim3 grid;
dim3 block;
namespace nbody_kernel
{
__global__
void calc_gravity_accel_naive(uint32_t n_obj, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
const uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (n_obj > i)
{
var3_t r_ij = {0, 0, 0};
for (uint32_t j = 0; j < n_obj; j++)
{
if (i == j)
{
continue;
}
r_ij.x = r[j].x - r[i].x;
r_ij.y = r[j].y - r[i].y;
r_ij.z = r[j].z - r[i].z;
var_t d2 = SQR(r_ij.x) + SQR(r_ij.y) + SQR(r_ij.z);
var_t d = sqrt(d2);
var_t d_3 = 1.0 / (d*d2);
var_t s = p[j].mass * d_3;
a[i].x += s * r_ij.x;
a[i].y += s * r_ij.y;
a[i].z += s * r_ij.z;
}
a[i].x *= K2;
a[i].y *= K2;
a[i].z *= K2;
}
}
__global__
void calc_gravity_accel_naive_sym(uint32_t n_obj, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
const uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (n_obj > i)
{
var3_t r_ij = {0, 0, 0};
for (uint32_t j = i+1; j < n_obj; j++)
{
r_ij.x = r[j].x - r[i].x;
r_ij.y = r[j].y - r[i].y;
r_ij.z = r[j].z - r[i].z;
var_t d2 = SQR(r_ij.x) + SQR(r_ij.y) + SQR(r_ij.z);
var_t d = sqrt(d2);
var_t d_3 = 1.0 / (d*d2);
var_t s = p[j].mass * d_3;
a[i].x += s * r_ij.x;
a[i].y += s * r_ij.y;
a[i].z += s * r_ij.z;
s = p[i].mass * d_3;
a[j].x -= s * r_ij.x;
a[j].y -= s * r_ij.y;
a[j].z -= s * r_ij.z;
}
a[i].x *= K2;
a[i].y *= K2;
a[i].z *= K2;
}
}
inline __host__ __device__
var3_t body_body_interaction(var3_t riVec, var3_t rjVec, var_t mj, var3_t aiVec)
{
var3_t dVec = {0.0, 0.0, 0.0};
// compute d = r_i - r_j [3 FLOPS] [6 read, 3 write]
dVec.x = rjVec.x - riVec.x;
dVec.y = rjVec.y - riVec.y;
dVec.z = rjVec.z - riVec.z;
// compute norm square of d vector [5 FLOPS] [3 read, 1 write]
var_t r2 = SQR(dVec.x) + SQR(dVec.y) + SQR(dVec.z);
// compute norm of d vector [1 FLOPS] [1 read, 1 write] TODO: how long does it take to compute sqrt ???
var_t r = sqrt(r2);
// compute m_j / d^3 []
var_t s = mj * 1.0 / (r2 * r);
aiVec.x += s * dVec.x;
aiVec.y += s * dVec.y;
aiVec.z += s * dVec.z;
return aiVec;
}
__global__
void calc_gravity_accel_tile(interaction_bound int_bound, int tile_size, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
extern __shared__ var3_t sh_pos[];
var3_t my_pos = {0.0, 0.0, 0.0};
var3_t acc = {0.0, 0.0, 0.0};
// i is the index of the SINK body
const uint32_t i = int_bound.sink.x + blockIdx.x * blockDim.x + threadIdx.x;
// To avoid overruning the r buffer
if (int_bound.sink.y > i)
{
my_pos = r[i];
}
for (int tile = 0; (tile * tile_size) < int_bound.source.y; tile++)
{
// src_idx is the index of the SOURCE body in the tile
int src_idx = int_bound.source.x + tile * tile_size + threadIdx.x;
// To avoid overruning the r buffer
if (int_bound.source.y > src_idx)
{
sh_pos[threadIdx.x] = r[src_idx];
}
__syncthreads();
// j is the index of the SOURCE body in the current tile
for (int j = 0; j < blockDim.x; j++)
{
// To avoid overrun the mass buffer
if (int_bound.source.y <= int_bound.source.x + (tile * tile_size) + j)
{
break;
}
// To avoid self-interaction or mathematically division by zero
if (i != int_bound.source.x + (tile * tile_size)+j)
{
acc = body_body_interaction(my_pos, sh_pos[j], p[src_idx].mass, acc);
}
}
__syncthreads();
}
// To avoid overruning the a buffer
if (int_bound.sink.y > i)
{
a[i] = acc;
}
}
} /* nbody_kernel */
/*
* -- Returns the amount of milliseconds elapsed since the UNIX epoch. Works on both --
* Returns the amount of microseconds elapsed since the UNIX epoch. Works on both
* windows and linux.
*/
uint64_t GetTimeMs64()
{
#ifdef _WIN32
/* Windows */
FILETIME ft;
LARGE_INTEGER li;
/* Get the amount of 100 nano seconds intervals elapsed since January 1, 1601 (UTC) and copy it
* to a LARGE_INTEGER structure. */
GetSystemTimeAsFileTime(&ft);
li.LowPart = ft.dwLowDateTime;
li.HighPart = ft.dwHighDateTime;
uint64_t ret = li.QuadPart;
ret -= 116444736000000000LL; /* Convert from file time to UNIX epoch time. */
//ret /= 10000; /* From 100 nano seconds (10^-7) to 1 millisecond (10^-3) intervals */
ret /= 10; /* From 100 nano seconds (10^-7) to 1 microsecond (10^-6) intervals */
return ret;
#else
/* Linux */
struct timeval tv;
gettimeofday(&tv, NULL);
uint64 ret = tv.tv_usec;
/* Convert from micro seconds (10^-6) to milliseconds (10^-3) */
//ret /= 1000;
/* Adds the seconds (10^0) after converting them to milliseconds (10^-3) */
//ret += (tv.tv_sec * 1000);
/* Adds the seconds (10^0) after converting them to microseconds (10^-6) */
ret += (tv.tv_sec * 1000000);
return ret;
#endif
}
float gpu_calc_dy(uint32_t n_var, uint16_t stage, var_t curr_t, const var_t* y_temp, var_t* dy, bool use_symm_prop)
{
set_kernel_launch_param(n_var, n_tpb, grid, block);
printf(" grid: (%4u, %4u, %4u)\n", grid.x, grid.y, grid.z);
printf("block: (%4u, %4u, %4u)\n", block.x, block.y, block.z);
var3_t* r = (var3_t*)y_temp;
var3_t* a = (var3_t*)(dy + 3*n_obj);
nbp_t::param_t* p = (nbp_t::param_t*)d_p;
hipEvent_t t0, t1;
CUDA_SAFE_CALL(hipEventCreate(&t0));
CUDA_SAFE_CALL(hipEventCreate(&t1));
CUDA_SAFE_CALL(hipEventRecord(t0));
// Clear the acceleration array: the += op can be used
CUDA_SAFE_CALL(hipMemset(a, 0, n_obj*sizeof(var3_t)));
// Copy the velocities into dy
// TODO: implement the asynchronous version of hipMemcpy: Performace ??
CUDA_SAFE_CALL(hipMemcpy(dy, y_temp + 3*n_obj, 3*n_obj*sizeof(var_t), hipMemcpyDeviceToDevice));
if (false == use_symm_prop)
{
hipLaunchKernelGGL(( nbody_kernel::calc_gravity_accel_naive), dim3(grid), dim3(block), 0, 0, n_obj, r, p, a);
}
else
{
hipLaunchKernelGGL(( nbody_kernel::calc_gravity_accel_naive_sym), dim3(grid), dim3(block), 0, 0, n_obj, r, p, a);
}
CUDA_CHECK_ERROR();
CUDA_SAFE_CALL(hipEventRecord(t1));
CUDA_SAFE_CALL(hipEventSynchronize(t1));
float dt = 0.0f;
CUDA_SAFE_CALL(hipEventElapsedTime(&dt, t0, t1));
return dt;
}
float gpu_calc_grav_accel_tile(uint32_t n_var, uint16_t stage, var_t curr_t, const var_t* y_temp, var_t* dy)
{
set_kernel_launch_param(n_var, n_tpb, grid, block);
printf(" grid: (%4u, %4u, %4u)\n", grid.x, grid.y, grid.z);
printf("block: (%4u, %4u, %4u)\n", block.x, block.y, block.z);
uint2_t sink = {0, n_obj};
uint2_t source = {0, n_obj};
interaction_bound int_bound(sink, source);
var3_t* r = (var3_t*)y_temp;
var3_t* a = (var3_t*)(dy + 3*n_obj);
nbp_t::param_t* p = (nbp_t::param_t*)d_p;
hipEvent_t t0, t1;
CUDA_SAFE_CALL(hipEventCreate(&t0));
CUDA_SAFE_CALL(hipEventCreate(&t1));
CUDA_SAFE_CALL(hipEventRecord(t0));
// Clear the acceleration array: the += op can be used
CUDA_SAFE_CALL(hipMemset(a, 0, n_obj*sizeof(var3_t)));
// Copy the velocities into dy
// TODO: implement the asynchronous version of hipMemcpy: Performace ??
CUDA_SAFE_CALL(hipMemcpy(dy, y_temp + 3*n_obj, 3*n_obj*sizeof(var_t), hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( nbody_kernel::calc_gravity_accel_tile), dim3(grid), dim3(block), n_tpb * sizeof(var3_t), 0, int_bound, n_tpb, r, p, a);
CUDA_CHECK_ERROR();
CUDA_SAFE_CALL(hipEventRecord(t1, 0));
CUDA_SAFE_CALL(hipEventSynchronize(t1));
float elapsed_time = 0.0f;
CUDA_SAFE_CALL(hipEventElapsedTime(&elapsed_time, t0, t1));
return elapsed_time;
}
void cpu_calc_dy(uint16_t stage, var_t curr_t, const var_t* y_temp, var_t* dy, bool use_symm_prop)
{
// Copy the velocities into dy
memcpy(dy, y_temp + 3*n_obj, 3*n_obj*sizeof(var_t));
var3_t* r = (var3_t*)y_temp;
var3_t* a = (var3_t*)(dy + 3*n_obj);
// Clear the acceleration array: the += op can be used
memset(a, 0, 3*n_obj*sizeof(var_t));
nbp_t::param_t* p = (nbp_t::param_t*)h_p;
if (use_symm_prop)
{
for (uint32_t i = 0; i < n_obj; i++)
{
var3_t r_ij = {0, 0, 0};
for (uint32_t j = i+1; j < n_obj; j++)
{
r_ij.x = r[j].x - r[i].x;
r_ij.y = r[j].y - r[i].y;
r_ij.z = r[j].z - r[i].z;
var_t d2 = SQR(r_ij.x) + SQR(r_ij.y) + SQR(r_ij.z);
var_t d = sqrt(d2);
var_t d_3 = 1.0 / (d*d2);
var_t s = p[j].mass * d_3;
a[i].x += s * r_ij.x;
a[i].y += s * r_ij.y;
a[i].z += s * r_ij.z;
s = p[i].mass * d_3;
a[j].x -= s * r_ij.x;
a[j].y -= s * r_ij.y;
a[j].z -= s * r_ij.z;
}
a[i].x *= K2;
a[i].y *= K2;
a[i].z *= K2;
}
}
else
{
for (uint32_t i = 0; i < n_obj; i++)
{
var3_t r_ij = {0, 0, 0};
for (uint32_t j = 0; j < n_obj; j++)
{
if (i == j)
{
continue;
}
r_ij.x = r[j].x - r[i].x;
r_ij.y = r[j].y - r[i].y;
r_ij.z = r[j].z - r[i].z;
var_t d2 = SQR(r_ij.x) + SQR(r_ij.y) + SQR(r_ij.z);
var_t d = sqrt(d2);
var_t d_3 = 1.0 / (d*d2);
var_t s = p[j].mass * d_3;
a[i].x += s * r_ij.x;
a[i].y += s * r_ij.y;
a[i].z += s * r_ij.z;
}
a[i].x *= K2;
a[i].y *= K2;
a[i].z *= K2;
}
}
}
void parse(int argc, const char** argv, uint32_t* n_obj)
{
int i = 1;
if (1 >= argc)
{
throw std::string("Missing command line arguments. For help use -h.");
}
while (i < argc)
{
std::string p = argv[i];
if ( p == "-n")
{
i++;
if (!tools::is_number(argv[i]))
{
throw std::string("Invalid number at: " + p);
}
*n_obj = atoi(argv[i]);
}
else
{
throw std::string("Invalid switch on command line: " + p + ".");
}
i++;
}
}
int main(int argc, const char *argv[])
{
var_t* h_y = NULL;
var_t* h_dy = NULL;
var_t* h_dy0 = NULL;
var_t* d_y = NULL;
var_t* d_dy = NULL;
uint32_t n_var = 0;
uint32_t n_par = 0;
try
{
// n_obj is a global variable
parse(argc, argv, &n_obj);
n_var = 6 * n_obj;
n_par = 1 * n_obj;
// Allocate HOST memory
ALLOCATE_HOST_VECTOR((void**)&h_y, n_var * sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&h_dy, n_var * sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&h_dy0, n_var * sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&h_p, n_par * sizeof(var_t));
// Allocate DEVICE memory
ALLOCATE_DEVICE_VECTOR((void**)&d_y, n_var * sizeof(var_t));
ALLOCATE_DEVICE_VECTOR((void**)&d_dy, n_var * sizeof(var_t));
ALLOCATE_DEVICE_VECTOR((void**)&d_p, n_par * sizeof(var_t));
// Populate data
srand(time(NULL));
for (uint32_t i = 0; i < n_var; i++)
{
var_t r = (var_t)rand()/RAND_MAX;
*(h_y + i) = r;
}
for (uint32_t i = 0; i < n_par; i++)
{
var_t r = (var_t)rand()/RAND_MAX;
*(h_p + i) = 1;
}
CUDA_SAFE_CALL(hipMemcpy(d_y, h_y, n_var * sizeof(var_t), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_p, h_p, n_par * sizeof(var_t), hipMemcpyHostToDevice));
var_t t0 = 0.0;
uint16_t stage = 0;
uint64_t T0 = GetTimeMs64();
cpu_calc_dy(stage, t0, h_y, h_dy, false);
uint64_t T1 = GetTimeMs64();
var_t DT_CPU = ((var_t)(T1 - T0))/1000.0f;
printf("CPU execution time: %16.4e [ms]\n", DT_CPU);
T0 = GetTimeMs64();
cpu_calc_dy(stage, t0, h_y, h_dy0, true);
T1 = GetTimeMs64();
DT_CPU = ((var_t)(T1 - T0))/1000.0f;
printf("CPU execution time: %16.4e [ms]\n", DT_CPU);
for (uint32_t j = 0; j < n_var; j++)
{
if (1.0e-15 < fabs(h_dy[j] - h_dy0[j]))
{
printf("Difference: j = %6u : %24.16e\n", j, h_dy[j] - h_dy0[j]);
}
}
T0 = GetTimeMs64();
float _DT_GPU = gpu_calc_dy(n_var, stage, t0, d_y, d_dy, false);
T1 = GetTimeMs64();
var_t DT_GPU = ((var_t)(T1 - T0))/1000.0f;
printf("GPU execution time: %16.4e [ms]\n", DT_GPU);
printf("GPU execution time: %16.4e [ms]\n", _DT_GPU);
printf("%10u %16.4e %16.4e %16.4e %16.4e\n", n_obj, DT_CPU, DT_GPU, _DT_GPU, DT_CPU/_DT_GPU);
// Copy down the data from the DEVICE
CUDA_SAFE_CALL(hipMemcpy(h_dy0, d_dy, n_var * sizeof(var_t), hipMemcpyDeviceToHost));
for (uint32_t j = 0; j < n_var; j++)
{
if (1.0e-15 < fabs(h_dy[j] - h_dy0[j]))
{
printf("Difference: j = %6u : %24.16e\n", j, h_dy[j] - h_dy0[j]);
}
}
T0 = GetTimeMs64();
_DT_GPU = gpu_calc_dy(n_var, stage, t0, d_y, d_dy, true);
T1 = GetTimeMs64();
DT_GPU = ((var_t)(T1 - T0))/1000.0f;
printf("GPU execution time: %16.4e [ms]\n", DT_GPU);
printf("GPU execution time: %16.4e [ms]\n", _DT_GPU);
printf("%10u %16.4e %16.4e %16.4e %16.4e\n", n_obj, DT_CPU, DT_GPU, _DT_GPU, DT_CPU/_DT_GPU);
// Copy down the data from the DEVICE
CUDA_SAFE_CALL(hipMemcpy(h_dy0, d_dy, n_var * sizeof(var_t), hipMemcpyDeviceToHost));
for (uint32_t j = 0; j < n_var; j++)
{
if (1.0e-15 < fabs(h_dy[j] - h_dy0[j]))
{
printf("Difference: j = %6u : %24.16e\n", j, h_dy[j] - h_dy0[j]);
}
}
T0 = GetTimeMs64();
_DT_GPU = gpu_calc_grav_accel_tile(n_var, stage, t0, d_y, d_dy);
T1 = GetTimeMs64();
DT_GPU = ((var_t)(T1 - T0))/1000.0f;
printf("GPU execution time: %16.4e [ms]\n", DT_GPU);
printf("GPU execution time: %16.4e [ms]\n", _DT_GPU);
printf("%10u %16.4e %16.4e %16.4e %16.4e\n", n_obj, DT_CPU, DT_GPU, _DT_GPU, DT_CPU/_DT_GPU);
// Copy down the data from the DEVICE
CUDA_SAFE_CALL(hipMemcpy(h_dy0, d_dy, n_var * sizeof(var_t), hipMemcpyDeviceToHost));
for (uint32_t j = 0; j < n_var; j++)
{
if (1.0e-15 < fabs(h_dy[j] - h_dy0[j]))
{
printf("Difference: j = %6u : %24.16e\n", j, h_dy[j] - h_dy0[j]);
}
}
FREE_HOST_VECTOR((void**)&h_y );
FREE_HOST_VECTOR((void**)&h_dy );
FREE_HOST_VECTOR((void**)&h_dy0);
FREE_HOST_VECTOR((void**)&h_p );
FREE_DEVICE_VECTOR((void**)&d_y );
FREE_DEVICE_VECTOR((void**)&d_dy);
FREE_DEVICE_VECTOR((void**)&d_p );
}
catch (const std::string& msg)
{
std::cerr << "Error: " << msg << std::endl;
}
std::cout << "Gravitational interaction computations done.\n";
return 0;
}
#endif
#if 0
#include <stdio.h> /* printf, scanf, puts, NULL */
#include <stdlib.h> /* srand, rand, malloc */
#include <time.h> /* time */
#include <iostream>
#include <string>
#include "constants.h"
#include "type.h"
#include "redutil2.h"
using namespace std;
using namespace redutil2;
int comp_value(var_t v1, var_t v2, var_t tol, char* lpad, char* text)
{
int result = 0;
var_t d = fabs(v1 - v2);
if (tol < d)
{
printf("%s%s = %25.15lg\n", lpad, text, d);
result = 1;
}
return result;
}
int comp_oe(orbelem_t &oe1, orbelem_t& oe2, var_t tol, char* lpad)
{
int result = comp_value(oe1.sma, oe2.sma, tol, lpad, "Abs(Delta(sma ))");
result += comp_value(oe1.ecc, oe2.ecc, tol, lpad, "Abs(Delta(ecc ))");
result += comp_value(oe1.inc, oe2.inc, tol, lpad, "Abs(Delta(inc ))");
result += comp_value(oe1.peri, oe2.peri, tol, lpad, "Abs(Delta(peri))");
result += comp_value(oe1.node, oe2.node, tol, lpad, "Abs(Delta(node))");
result += comp_value(oe1.mean, oe2.mean, tol, lpad, "Abs(Delta(mean))");
return result;
}
int comp_2D_vectors(var2_t &v1, var2_t &v2, var_t tol, char* lpad)
{
int result = comp_value(v1.x, v2.x, tol, lpad, "Abs(Delta(v1.x - v2.x))");
result += comp_value(v1.y, v2.y, tol, lpad, "Abs(Delta(v1.y - v2.y))");
return result;
}
var_t random(var_t x0, var_t x1)
{
return (x0 + ((var_t)rand() / RAND_MAX) * (x1 - x0));
}
void test_calc_ephemeris()
{
// Test calculate phase from orbital elements and vice versa
{
const char func_name[] = "calc_phase";
char lpad[] = " ";
/*
* The units are:
* Unit name | Unit symbol | Quantity name
* -----------------------------------------------
* Astronomical unit | AU | length
* Solar mass | S | mass
* Mean solar day | D | time
*/
srand((unsigned int)time(NULL));
// parameter of the problem
tbp_t::param_t p;
// Set the parameter of the problem
p.mu = constants::Gauss2 * (1.0 + 1.0);
// Set the initial orbital elements
orbelem_t oe1 = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
orbelem_t oe2 = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
var3_t r0 = {0, 0, 0};
var3_t v0 = {0, 0, 0};
var_t tol = 1.0e-14;
for (int i = 0; i < 100; i++)
{
oe1.sma = random(0.1, 10.0);
oe1.ecc = random(0.0, 0.8);
oe1.inc = random(0.0, PI);
oe1.peri =random(0.0, TWOPI);
oe1.node =random(0.0, TWOPI);
oe1.mean =random(0.0, TWOPI);
// Calculate the position and velocity vectors from orbital elements
tools::calc_phase(p.mu, &oe1, &r0, &v0);
// Calculate the orbital elements from position and velocity vectors
tools::calc_oe(p.mu, &r0, &v0, &oe2);
int ret_val = comp_oe(oe1, oe2, tol, lpad);
if (0 < ret_val)
{
printf(" TEST '%s' failed with tolerance level: %g\n", func_name, tol);
}
else
{
printf(" TEST '%s' passed with tolerance level: %g\n", func_name, tol);
}
}
} /* Test calc_phase() and calc_oe() functions */
}
void test_rtbp2d_calc_energy()
{
// Test tools::tbp::calc_integral() and tools::rtbp2D::calc_integral() functions
{
const char func_name[] = "tools::tbp::calc_integral";
char lpad[] = " ";
/*
* The units are:
* Unit name | Unit symbol | Quantity name
* -----------------------------------------------
* Astronomical unit | AU | length
* Solar mass | S | mass
* Mean solar day | D | time
*/
srand(0);
orbelem_t oe = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
var3_t r0 = {0, 0, 0};
var3_t v0 = {0, 0, 0};
var_t mu = constants::Gauss2*(1.0 + 1.0);
var_t tol = 1.0e-15;
for (int i = 0; i < 10; i++)
{
// Set the initial orbital elements
oe.sma = random(0.1, 10.0);
oe.ecc = random(0.0, 0.8);
oe.inc = 0.0;
oe.peri = random(0.0, TWOPI);
oe.node = 0.0;
oe.mean = random(0.0, TWOPI);
// Calculate the position and velocity vectors from orbital elements
tools::calc_phase(mu, &oe, &r0, &v0);
// Set the starting coordinate and velocity vectors
var2_t r = {r0.x, r0.y};
var2_t v = {v0.x, v0.y};
var2_t u = {0, 0};
var2_t up = {0, 0};
tools::rtbp2D::transform_x2u(r, u);
tools::rtbp2D::transform_xd2up(u, v, up);
var_t hs = tools::tbp::calc_integral(mu, r, v);
var_t hr = tools::rtbp2D::calc_integral(mu, u, up);
printf(" hs = %25.15le\n", hs);
printf(" hr = %25.15le\n", hr);
}
// Calculate the energy along a Kepler-orbit
oe.sma = 1.5;
oe.ecc = 0.8;
oe.inc = 0.0;
oe.peri = 0.0;
oe.node = 0.0;
oe.mean = 0.0;
do
{
tools::calc_phase(mu, &oe, &r0, &v0);
var2_t r = {r0.x, r0.y};
var2_t v = {v0.x, v0.y};
var2_t u = {0, 0};
var2_t up = {0, 0};
tools::rtbp2D::transform_x2u(r, u);
tools::rtbp2D::transform_xd2up(u, v, up);
var_t hs = tools::tbp::calc_integral(mu, r, v);
var_t hr = tools::rtbp2D::calc_integral(mu, u, up);
printf("%25.15le %25.15le %25.15le\n", oe.mean, hs, hr);
oe.mean += 1.0 * constants::DegreeToRadian;
} while (oe.mean <= TWOPI);
} /* Test tools::rtbp2D::transform_x2u() and tools::rtbp2D::transform_u2x() functions */
}
void test_rtbp2d_transform()
{
// Test square (section lines)
{
var_t d = 0.01;
// Q4 -> Q1
var2_t x = {0.5, -0.5};
var2_t u = {0, 0};
do
{
tools::rtbp2D::transform_x2u(x, u);
printf("%23.15le %23.15le %23.15le %23.15le\n", x.x, x.y, u.x, u.y);
x.y += d;
} while (0.5 >= x.y);
// Q1 -> Q2
do
{
tools::rtbp2D::transform_x2u(x, u);
printf("%23.15le %23.15le %23.15le %23.15le\n", x.x, x.y, u.x, u.y);
x.x -= d;
} while (-0.5 <= x.x);
// Q2 -> Q3
do
{
tools::rtbp2D::transform_x2u(x, u);
printf("%23.15le %23.15le %23.15le %23.15le\n", x.x, x.y, u.x, u.y);
x.y -= d;
} while (-0.5 <= x.y);
// Q3 -> Q4
do
{
tools::rtbp2D::transform_x2u(x, u);
printf("%23.15le %23.15le %23.15le %23.15le\n", x.x, x.y, u.x, u.y);
x.x += d;
} while (0.5 >= x.x);
}
return;
// Test ellipse
{
const char func_name[] = "tools::rtbp2D::transform___";
char lpad[] = " ";
/*
* The units are:
* Unit name | Unit symbol | Quantity name
* -----------------------------------------------
* Astronomical unit | AU | length
* Solar mass | S | mass
* Mean solar day | D | time
*/
srand(0);
const var_t mu = constants::Gauss2*(1.0 + 1.0);
orbelem_t oe = {0.5, 0.8, 0.0, 0.0, 0.0, 0.0};
var3_t r0 = {0, 0, 0};
var3_t v0 = {0, 0, 0};
int i = 0;
do
{
oe.mean = i * constants::DegreeToRadian;
tools::calc_phase(mu, &oe, &r0, &v0);
var2_t x = {r0.x, r0.y};
var2_t xd = {v0.x, v0.y};
var2_t u = {0, 0};
var2_t up = {0, 0};
tools::rtbp2D::transform_x2u(x, u);
tools::rtbp2D::transform_xd2up(u, xd, up);
x.x = x.y = 0.0;
xd.x = xd.y = 0.0;
tools::rtbp2D::transform_u2x(u, x);
tools::rtbp2D::transform_up2xd(u, up, xd);
// Compare the original position and velocitiy vectors with the calculated ones
{
var_t tol = 1.0e-15;
var2_t x0 = {r0.x, r0.y};
var2_t x0d = {v0.x, v0.y};
comp_2D_vectors(x0, x, tol, lpad);
comp_2D_vectors(x0d, xd, tol, lpad);
}
printf("%23.15le %23.15le %23.15le %23.15le %23.15le %23.15le %23.15le %23.15le %23.15le\n", oe.mean, x.x, x.y, u.x, u.y, xd.x, xd.y, up.x, up.y);
if (0 < i && 0 == (i+1) % 90)
{
printf("\n");
}
i++;
} while (360 > i);
} /* Test tools::rtbp2D::transform_x2u() and tools::rtbp2D::transform_u2x() functions */
}
void test_calc_lin_comb()
{
// Test calculate linear combination of vectors
{
const char func_name[] = "calc_lin_comb";
char lpad[] = " ";
uint32_t n_var = 2;
uint16_t n_vct = 3;
var_t* a = NULL;
var_t* b = NULL;
var_t** c = NULL;
var_t* coeff = NULL;
ALLOCATE_HOST_VECTOR((void**)&(a), n_var*sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&(b), n_var*sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&c, n_vct*sizeof(var_t*));
for (uint16_t i = 0; i < n_vct; i++)
{
ALLOCATE_HOST_VECTOR((void**)&(c[i]), n_var*sizeof(var_t));
}
ALLOCATE_HOST_VECTOR((void**)&coeff, n_vct*sizeof(var_t));
// Populate vectors
memset(a, 0, n_var*sizeof(var_t));
for (int i = 0; i < n_var; i++)
{
b[i] = -(i+1);
}
for (uint32_t i = 0; i < n_vct; i++)
{
for (uint32_t j = 0; j < n_var; j++)
{
c[i][j] = i+j+1;
}
}
for (int i = 0; i < n_vct; i++)
{
coeff[i] = 10*i;
}
printf("The data in the vectors:\n");
printf("a:\n");
print_array("", n_var, a, PROC_UNIT_CPU);
printf("b:\n");
print_array("", n_var, b, PROC_UNIT_CPU);
for (uint32_t i = 0; i < n_vct; i++)
{
printf("c[%d]:\n", i);
print_array("", n_var, c[i], PROC_UNIT_CPU);
}
printf("The coefficients:\n");
print_array("", n_vct, coeff, PROC_UNIT_CPU);
// Calculate the linear combination of the vectors
tools::calc_lin_comb(a, c, coeff, n_vct, n_var);
printf("The linear combination of the vectors:\n");
printf("a:\n");
print_array("", n_var, a, PROC_UNIT_CPU);
// Calculate the special case of linear combination of the vectors
tools::calc_lin_comb_s(a, b, c, coeff, n_vct, n_var);
printf("The special linear combination of the vectors:\n");
printf("a:\n");
print_array("", n_var, a, PROC_UNIT_CPU);
FREE_HOST_VECTOR((void **)&(coeff));
for (uint16_t i = 0; i < n_vct; i++)
{
FREE_HOST_VECTOR((void **)&(c[i]));
}
FREE_HOST_VECTOR((void **)&(c));
FREE_HOST_VECTOR((void **)&(b));
FREE_HOST_VECTOR((void **)&(a));
}
// Test calculate linear combination of two vectors
{
const char func_name[] = "calc_lin_comb_s";
char lpad[] = " ";
uint32_t n_var = 2;
var_t* a = NULL;
var_t* b = NULL;
var_t* c = NULL;
var_t f = 3;
ALLOCATE_HOST_VECTOR((void**)&(a), n_var*sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&(b), n_var*sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&(c), n_var*sizeof(var_t));
// Populate vectors
memset(a, 0, n_var*sizeof(var_t));
for (int i = 0; i < n_var; i++)
{
b[i] = -(i+1);
c[i] = i+1;
}
printf("The data in the vectors:\n");
printf("a:\n");
print_array("", n_var, a, PROC_UNIT_CPU);
printf("b:\n");
print_array("", n_var, b, PROC_UNIT_CPU);
printf("c:\n");
print_array("", n_var, c, PROC_UNIT_CPU);
printf("The coefficient:\n");
printf("%5e\n", f);
// Calculate the special case of linear combination of the vectors
tools::calc_lin_comb_s(a, b, c, f, n_var);
printf("The special linear combination of two vectors:\n");
printf("a:\n");
print_array("", n_var, a, PROC_UNIT_CPU);
FREE_HOST_VECTOR((void **)&(c));
FREE_HOST_VECTOR((void **)&(b));
FREE_HOST_VECTOR((void **)&(a));
}
}
/*
cd 'C:\Work\red.cuda.Results\v2.0\Test_Copy\rtbp2D\Test_transform
a=1.0
p [-1:1][-1:1]'e_0.0_q1.txt' u 2:3 w l, '' u 4:5 w l, 'e_0.0_q2.txt' u 2:3 w l, '' u 4:5 w l, 'e_0.0_q3.txt' u 2:3 w l, '' u 4:5 w l, 'e_0.0_q4.txt' u 2:3 w l, '' u 4:5 w l
a=0.05
p [-a:a][-a:a]'e_0.0_q1.txt' u 6:7 w l, '' u 8:9 w l, 'e_0.0_q2.txt' u 6:7 w l, '' u 8:9 w l, 'e_0.0_q3.txt' u 6:7 w l, '' u 8:9 w l, 'e_0.0_q4.txt' u 6:7 w l, '' u 8:9 w l
a=1.0
p [-1:1][-1:1]'e_0.2_q1.txt' u 2:3 w l, '' u 4:5 w l, 'e_0.2_q2.txt' u 2:3 w l, '' u 4:5 w l, 'e_0.2_q3.txt' u 2:3 w l, '' u 4:5 w l, 'e_0.2_q4.txt' u 2:3 w l, '' u 4:5 w l
a=0.05
p [-a:a][-a:a]'e_0.2_q1.txt' u 6:7 w l, '' u 8:9 w l, 'e_0.2_q2.txt' u 6:7 w l, '' u 8:9 w l, 'e_0.2_q3.txt' u 6:7 w l, '' u 8:9 w l, 'e_0.2_q4.txt' u 6:7 w l, '' u 8:9 w l
a=1.0
p [-1:1][-1:1]'e_0.8_q1.txt' u 2:3 w l, '' u 4:5 w l, 'e_0.8_q2.txt' u 2:3 w l, '' u 4:5 w l, 'e_0.8_q3.txt' u 2:3 w l, '' u 4:5 w l, 'e_0.8_q4.txt' u 2:3 w l, '' u 4:5 w l
a=0.05
p [-a:a][-a:a]'e_0.8_q1.txt' u 6:7 w l, '' u 8:9 w l, 'e_0.8_q2.txt' u 6:7 w l, '' u 8:9 w l, 'e_0.8_q3.txt' u 6:7 w l, '' u 8:9 w l, 'e_0.8_q4.txt' u 6:7 w l, '' u 8:9 w l
*/
int main()
{
try
{
//test_calc_ephemeris();
//test_rtbp2d_trans();
//test_rtbp2d_transform();
//test_rtbp2d_calc_energy();
test_calc_lin_comb();
}
catch (const string& msg)
{
cerr << "Error: " << msg << endl;
}
return 0;
}
#endif
|
ac2235065b9dd63221919cbfcc3ceb6c55357dd9.cu
|
/*
* Test the constant memory of thee DEVICE
* The constant memory does not work on NVIDIA GeForce GT 620 !!
*/
#if 1
#include <stdio.h> /* printf, scanf, puts, NULL */
#include <iostream>
#include "device_launch_parameters.h"
#include "cuda_runtime.h"
#include "macro.h"
#include "type.h"
#include "redutil2.h"
using namespace redutil2;
// The Runge-Kutta matrix
var_t a[] =
{
1.0, -10.0,
0.5, -0.25
};
__constant__ var_t dc_a[sizeof(a) / sizeof(var_t)];
//! Print pointer and dereferennce it
__global__
void print_ptr(const var_t* a)
{
uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (0 == tid)
{
printf("%p\n", a);
printf("a[0] = %25.16le\n", a[0]);
printf("a[1] = %25.16le\n", a[1]);
printf("a[2] = %25.16le\n", a[2]);
printf("a[3] = %25.16le\n", a[3]);
}
}
int main()
{
try
{
var_t* d_a = NULL;
ALLOCATE_DEVICE_VECTOR((void**)&d_a, sizeof(a));
copy_vector_to_device(d_a, a, sizeof(a));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(dc_a, a, sizeof(a)));
//copy_constant_to_device(dc_a, a, sizeof(a));
printf("d_a:\n");
print_ptr<<<1, 1>>>(d_a);
CUDA_CHECK_ERROR();
printf("dc_a:\n");
print_ptr<<<1, 1>>>(dc_a);
CUDA_CHECK_ERROR();
FREE_DEVICE_VECTOR((void**)&d_a);
}
catch (const std::string& msg)
{
std::cerr << "Error: " << msg << "." << std::endl;
}
return 0;
}
#endif
/*
* 2016.11.11. - 11.13. TEST OK
* Allocation of array of pointers
* Allocation of each element in the array
*/
#if 0
#include <stdio.h> /* printf, scanf, puts, NULL */
#include <stdlib.h> /* srand, rand, malloc */
#include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "type.h"
#include "macro.h"
#include "redutil2.h"
using namespace redutil2;
namespace kernel_test
{
__global__
void print_array(int **a, uint32_t n_vct)
{
const int tid = threadIdx.x;
if (0 == tid)
{
for (uint32_t i = 0; i < n_vct; i++)
{
printf("[%u]: %p *(_+%u): %p\n", i, a[i], i, *(a+i));
}
}
}
__global__
void print_array(int *a, uint32_t n_arr)
{
const int tid = threadIdx.x;
if (0 == tid)
{
for (uint32_t i = 0; i < n_arr; i++)
{
printf("\t[%u]: %d\n", i, a[i]);
}
}
}
} /* kernel_test */
void print_array(int **a, uint32_t n_vct)
{
for (uint32_t i = 0; i < n_vct; i++)
{
printf(" +%u: %p\t", i, a+i);
printf("[%u]: %p *( +%u): %p\n", i, a[i], i, *(a+i));
}
}
void print_array(int *a, uint32_t n_arr)
{
for (uint32_t i = 0; i < n_arr; i++)
{
printf("\t[%u]: %d\n", i, a[i]);
}
}
int main()
{
static const uint32_t n_vct = 5;
static const uint32_t n_arr = 9;
int** h_k = NULL;
int** d_k = NULL;
int** tmp = NULL;
try
{
printf("h_k: %p\t", h_k);
// Allocate HOST memory
ALLOCATE_HOST_VECTOR((void**)&h_k, n_vct*sizeof(int*));
printf("after allocation: %p\n", h_k);
for (uint32_t i = 0; i < n_vct; i++)
{
printf("h_k[%u]: %p\t", i, h_k[i]);
ALLOCATE_HOST_VECTOR((void**)(h_k + i), n_arr*sizeof(int));
printf("after allocation: %p\n", h_k[i]);
print_array(*(h_k + i), n_arr);
}
printf("tmp: %p\t", tmp);
ALLOCATE_HOST_VECTOR((void**)&tmp, n_vct*sizeof(int*));
printf("after allocation: %p\n", tmp);
// Allocate DEVICE memory
printf("d_k: %p\t", d_k);
ALLOCATE_DEVICE_VECTOR((void**)(&d_k), n_vct*sizeof(int*));
printf("after allocation: %p\n", d_k);
for (uint32_t i = 0; i < n_vct; i++)
{
printf("tmp[%u]: %p\t", i, tmp[i]);
ALLOCATE_DEVICE_VECTOR((void**)(tmp + i), n_arr*sizeof(int));
printf("after allocation: %p\n", tmp[i]);
kernel_test::print_array<<<1, 1>>>(*(tmp + i), n_arr);
CUDA_CHECK_ERROR();
cudaThreadSynchronize();
}
CUDA_SAFE_CALL(cudaMemcpy(d_k, tmp, n_vct * sizeof(int*), cudaMemcpyHostToDevice));
kernel_test::print_array<<<1, 1>>>(d_k, n_vct);
CUDA_CHECK_ERROR();
cudaThreadSynchronize();
// Populate data
for (uint32_t i = 0; i < n_vct; i++)
{
for (uint32_t j = 0; j < n_arr; j++)
{
*(*(h_k+i)+j) = i*10 + j;
}
printf("h_k[%u]: %p\n", i, h_k[i]);
print_array(*(h_k + i), n_arr);
printf("\n");
printf("tmp[%u]: %p\n", i, tmp[i]);
CUDA_SAFE_CALL(cudaMemcpy(tmp[i], h_k[i], n_arr * sizeof(int), cudaMemcpyHostToDevice));
kernel_test::print_array<<<1, 1>>>(tmp[i], n_arr);
CUDA_CHECK_ERROR();
cudaThreadSynchronize();
}
// Deallocate memory
for (uint32_t i = 0; i < n_vct; i++)
{
FREE_HOST_VECTOR((void**)(h_k + i));
FREE_DEVICE_VECTOR((void**)(tmp + i));
}
FREE_HOST_VECTOR((void**)&h_k);
FREE_HOST_VECTOR((void**)&tmp);
FREE_DEVICE_VECTOR((void**)&d_k);
}
catch (const std::string& msg)
{
std::cerr << "Error: " << msg << std::endl;
}
return 0;
}
#endif
/*
* 2016.11.13. - 11.13. TEST OK
* Compute the linear combination of arrays on the DEVICE
* and comapre the results those computed on the HOST
*/
#if 0
#include <stdio.h> /* printf, scanf, puts, NULL */
#include <stdlib.h> /* srand, rand, malloc */
#include <time.h> /* time */
#include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "type.h"
#include "macro.h"
#include "redutil2.h"
using namespace redutil2;
namespace kernel_test
{
__global__
void print_array(var_t *a, uint32_t n_arr)
{
const int tid = threadIdx.x;
if (0 == tid)
{
for (uint32_t i = 0; i < n_arr; i++)
{
printf("\t[%u]: %g\n", i, a[i]);
}
}
}
//! Calculate the special case of linear combination of vectors, a[i] = b[i] + sum (coeff[j] * c[j][i])
/*
\param a vector which will contain the result
\param b vector to which the linear combination will be added
\param c vectors which will linear combined
\param coeff vector which contains the weights (coefficients)
\param n_vct the number of vectors to combine
\param n_var the number of elements in the vectors
*/
__global__
void calc_lin_comb_s(var_t* a, const var_t* b, const var_t* const *c, const var_t* coeff, uint16_t n_vct, uint32_t n_var)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n_var)
{
var_t d = 0.0;
for (uint16_t j = 0; j < n_vct; j++)
{
if (0.0 == coeff[j])
{
continue;
}
d += coeff[j] * c[j][tid];
}
a[tid] = b[tid] + d;
}
}
} /* kernel_test */
int main()
{
static const uint32_t n_vct = 5;
static const uint32_t n_arr = 3000;
var_t** h_k = NULL;
var_t** d_k = NULL;
var_t** tmp = NULL;
var_t* h_a = NULL;
var_t* h_a0 = NULL; // Will hold a copy of d_a
var_t* h_b = NULL;
var_t* h_coeff = NULL;
var_t* d_a = NULL;
var_t* d_b = NULL;
var_t* d_coeff = NULL;
try
{
// Allocate HOST memory
ALLOCATE_HOST_VECTOR((void**)&h_k, n_vct*sizeof(var_t*));
for (uint32_t i = 0; i < n_vct; i++)
{
ALLOCATE_HOST_VECTOR((void**)(h_k + i), n_arr*sizeof(var_t));
}
ALLOCATE_HOST_VECTOR((void**)&tmp, n_vct*sizeof(var_t*));
ALLOCATE_HOST_VECTOR((void**)&h_a, n_arr*sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&h_a0, n_arr*sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&h_b, n_arr*sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&h_coeff, n_vct*sizeof(var_t));
// Allocate DEVICE memory
ALLOCATE_DEVICE_VECTOR((void**)(&d_k), n_vct*sizeof(var_t*));
for (uint32_t i = 0; i < n_vct; i++)
{
ALLOCATE_DEVICE_VECTOR((void**)(tmp + i), n_arr*sizeof(var_t));
}
CUDA_SAFE_CALL(cudaMemcpy(d_k, tmp, n_vct * sizeof(var_t*), cudaMemcpyHostToDevice));
ALLOCATE_DEVICE_VECTOR((void**)&d_a, n_arr*sizeof(var_t));
ALLOCATE_DEVICE_VECTOR((void**)&d_b, n_arr*sizeof(var_t));
ALLOCATE_DEVICE_VECTOR((void**)&d_coeff, n_vct*sizeof(var_t));
// Populate data
srand(time(NULL));
for (uint32_t i = 0; i < n_vct; i++)
{
for (uint32_t j = 0; j < n_arr; j++)
{
var_t r = (var_t)rand()/RAND_MAX; //returns a pseudo-random integer between 0 and RAND_MAX
*(*(h_k+i)+j) = r;
}
CUDA_SAFE_CALL(cudaMemcpy(tmp[i], h_k[i], n_arr * sizeof(var_t), cudaMemcpyHostToDevice));
}
for (uint32_t j = 0; j < n_arr; j++)
{
h_a[j] = 0;
h_b[j] = 0;
}
for (uint32_t j = 0; j < n_vct; j++)
{
h_coeff[j] = 1;
}
h_coeff[4] = -1;
CUDA_SAFE_CALL(cudaMemcpy(d_a, h_a, n_arr * sizeof(var_t), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_b, h_b, n_arr * sizeof(var_t), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_coeff, h_coeff, n_vct * sizeof(var_t), cudaMemcpyHostToDevice));
// Test the tools::calc_lin_comb_s() and gpu_calc_lin_comb_s() functions
// Compute a[i] = b[i] + f*c[i]
{
printf("Compute a[i] = b[i] + f*c[i]\n\n");
var_t f = 2.0;
var_t *h_c = *h_k;
tools::calc_lin_comb_s(h_a, h_b, h_c, f, n_arr);
var_t *d_c = *tmp;
gpu_calc_lin_comb_s( d_a, d_b, d_c, f, n_arr, 0, false);
//printf("h_a:\n");
//print_array("", n_arr, h_a, PROC_UNIT_CPU);
//printf("d_a:\n");
//print_array("", n_arr, d_a, PROC_UNIT_GPU);
CUDA_SAFE_CALL(cudaMemcpy(h_a0, d_a, n_arr * sizeof(var_t), cudaMemcpyDeviceToHost));
for (uint32_t j = 0; j < n_arr; j++)
{
if (0 != fabs(h_a[j] - h_a0[j]))
{
printf("Difference: j = %6u : %g\n", j, h_a[j] - h_a0[j]);
}
}
}
// Test the tools::calc_lin_comb_s() and gpu_calc_lin_comb_s() functions
// Compute a[i] = b[i] + sum (coeff[j] * c[j][i])
{
printf("Compute a[i] = b[i] + sum (coeff[j] * c[j][i])\n\n");
tools::calc_lin_comb_s(h_a, h_b, h_k, h_coeff, n_vct, n_arr);
gpu_calc_lin_comb_s( d_a, d_b, d_k, d_coeff, n_vct, n_arr, 0, false);
CUDA_SAFE_CALL(cudaMemcpy(h_a0, d_a, n_arr * sizeof(var_t), cudaMemcpyDeviceToHost));
for (uint32_t j = 0; j < n_arr; j++)
{
if (0 != fabs(h_a[j] - h_a0[j]))
{
printf("Difference: j = %6u : %g\n", j, h_a[j] - h_a0[j]);
}
}
}
// Deallocate memory
for (uint32_t i = 0; i < n_vct; i++)
{
FREE_HOST_VECTOR((void**)(h_k + i));
FREE_DEVICE_VECTOR((void**)(tmp + i));
}
FREE_HOST_VECTOR((void**)&h_k);
FREE_HOST_VECTOR((void**)&tmp);
FREE_DEVICE_VECTOR((void**)&d_k);
FREE_HOST_VECTOR((void**)&h_a);
FREE_HOST_VECTOR((void**)&h_a0);
FREE_HOST_VECTOR((void**)&h_b);
FREE_HOST_VECTOR((void**)&h_coeff);
FREE_DEVICE_VECTOR((void**)&d_a);
FREE_DEVICE_VECTOR((void**)&d_b);
FREE_DEVICE_VECTOR((void**)&d_coeff);
}
catch (const std::string& msg)
{
std::cerr << "Error: " << msg << std::endl;
}
std::cout << "Compute the linear combination of arrays on the DEVICE and comapre the results those computed on the HOST done.\n";
return 0;
}
#endif
/*
* 2016.11.14. -
* Gravitational interaction computations
*/
#if 0
/*
Premature optimization is the ROOT OF ALL EVIL. Always remember the three rules of optimization!
1. Don't optimize.
2. If you are an expert, see rule #1
3. If you are an expert and can justify the need, then use the following procedure:
- Code it unoptimized
- determine how fast is "Fast enough"--Note which user requirement/story requires that metric.
- Write a speed test
- Test existing code--If it's fast enough, you're done.
- Recode it optimized
- Test optimized code. IF it doesn't meet the metric, throw it away and keep the original.
- If it meets the test, keep the original code in as comments
*/
#include <stdio.h> /* printf, scanf, puts, NULL */
#include <stdlib.h> /* srand, rand, malloc */
#include <time.h> /* time */
#include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "type.h"
#include "macro.h"
#include "redutil2.h"
#ifdef _WIN32
#include <Windows.h>
#else
#include <sys/time.h>
#include <ctime>
#endif
using namespace redutil2;
// Global variables
uint32_t n_tpb = 128;
uint32_t n_obj = 0;
var_t* h_p = NULL;
var_t* d_p = NULL;
dim3 grid;
dim3 block;
namespace nbody_kernel
{
__global__
void calc_gravity_accel_naive(uint32_t n_obj, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
const uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (n_obj > i)
{
var3_t r_ij = {0, 0, 0};
for (uint32_t j = 0; j < n_obj; j++)
{
if (i == j)
{
continue;
}
r_ij.x = r[j].x - r[i].x;
r_ij.y = r[j].y - r[i].y;
r_ij.z = r[j].z - r[i].z;
var_t d2 = SQR(r_ij.x) + SQR(r_ij.y) + SQR(r_ij.z);
var_t d = sqrt(d2);
var_t d_3 = 1.0 / (d*d2);
var_t s = p[j].mass * d_3;
a[i].x += s * r_ij.x;
a[i].y += s * r_ij.y;
a[i].z += s * r_ij.z;
}
a[i].x *= K2;
a[i].y *= K2;
a[i].z *= K2;
}
}
__global__
void calc_gravity_accel_naive_sym(uint32_t n_obj, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
const uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (n_obj > i)
{
var3_t r_ij = {0, 0, 0};
for (uint32_t j = i+1; j < n_obj; j++)
{
r_ij.x = r[j].x - r[i].x;
r_ij.y = r[j].y - r[i].y;
r_ij.z = r[j].z - r[i].z;
var_t d2 = SQR(r_ij.x) + SQR(r_ij.y) + SQR(r_ij.z);
var_t d = sqrt(d2);
var_t d_3 = 1.0 / (d*d2);
var_t s = p[j].mass * d_3;
a[i].x += s * r_ij.x;
a[i].y += s * r_ij.y;
a[i].z += s * r_ij.z;
s = p[i].mass * d_3;
a[j].x -= s * r_ij.x;
a[j].y -= s * r_ij.y;
a[j].z -= s * r_ij.z;
}
a[i].x *= K2;
a[i].y *= K2;
a[i].z *= K2;
}
}
inline __host__ __device__
var3_t body_body_interaction(var3_t riVec, var3_t rjVec, var_t mj, var3_t aiVec)
{
var3_t dVec = {0.0, 0.0, 0.0};
// compute d = r_i - r_j [3 FLOPS] [6 read, 3 write]
dVec.x = rjVec.x - riVec.x;
dVec.y = rjVec.y - riVec.y;
dVec.z = rjVec.z - riVec.z;
// compute norm square of d vector [5 FLOPS] [3 read, 1 write]
var_t r2 = SQR(dVec.x) + SQR(dVec.y) + SQR(dVec.z);
// compute norm of d vector [1 FLOPS] [1 read, 1 write] TODO: how long does it take to compute sqrt ???
var_t r = sqrt(r2);
// compute m_j / d^3 []
var_t s = mj * 1.0 / (r2 * r);
aiVec.x += s * dVec.x;
aiVec.y += s * dVec.y;
aiVec.z += s * dVec.z;
return aiVec;
}
__global__
void calc_gravity_accel_tile(interaction_bound int_bound, int tile_size, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
extern __shared__ var3_t sh_pos[];
var3_t my_pos = {0.0, 0.0, 0.0};
var3_t acc = {0.0, 0.0, 0.0};
// i is the index of the SINK body
const uint32_t i = int_bound.sink.x + blockIdx.x * blockDim.x + threadIdx.x;
// To avoid overruning the r buffer
if (int_bound.sink.y > i)
{
my_pos = r[i];
}
for (int tile = 0; (tile * tile_size) < int_bound.source.y; tile++)
{
// src_idx is the index of the SOURCE body in the tile
int src_idx = int_bound.source.x + tile * tile_size + threadIdx.x;
// To avoid overruning the r buffer
if (int_bound.source.y > src_idx)
{
sh_pos[threadIdx.x] = r[src_idx];
}
__syncthreads();
// j is the index of the SOURCE body in the current tile
for (int j = 0; j < blockDim.x; j++)
{
// To avoid overrun the mass buffer
if (int_bound.source.y <= int_bound.source.x + (tile * tile_size) + j)
{
break;
}
// To avoid self-interaction or mathematically division by zero
if (i != int_bound.source.x + (tile * tile_size)+j)
{
acc = body_body_interaction(my_pos, sh_pos[j], p[src_idx].mass, acc);
}
}
__syncthreads();
}
// To avoid overruning the a buffer
if (int_bound.sink.y > i)
{
a[i] = acc;
}
}
} /* nbody_kernel */
/*
* -- Returns the amount of milliseconds elapsed since the UNIX epoch. Works on both --
* Returns the amount of microseconds elapsed since the UNIX epoch. Works on both
* windows and linux.
*/
uint64_t GetTimeMs64()
{
#ifdef _WIN32
/* Windows */
FILETIME ft;
LARGE_INTEGER li;
/* Get the amount of 100 nano seconds intervals elapsed since January 1, 1601 (UTC) and copy it
* to a LARGE_INTEGER structure. */
GetSystemTimeAsFileTime(&ft);
li.LowPart = ft.dwLowDateTime;
li.HighPart = ft.dwHighDateTime;
uint64_t ret = li.QuadPart;
ret -= 116444736000000000LL; /* Convert from file time to UNIX epoch time. */
//ret /= 10000; /* From 100 nano seconds (10^-7) to 1 millisecond (10^-3) intervals */
ret /= 10; /* From 100 nano seconds (10^-7) to 1 microsecond (10^-6) intervals */
return ret;
#else
/* Linux */
struct timeval tv;
gettimeofday(&tv, NULL);
uint64 ret = tv.tv_usec;
/* Convert from micro seconds (10^-6) to milliseconds (10^-3) */
//ret /= 1000;
/* Adds the seconds (10^0) after converting them to milliseconds (10^-3) */
//ret += (tv.tv_sec * 1000);
/* Adds the seconds (10^0) after converting them to microseconds (10^-6) */
ret += (tv.tv_sec * 1000000);
return ret;
#endif
}
float gpu_calc_dy(uint32_t n_var, uint16_t stage, var_t curr_t, const var_t* y_temp, var_t* dy, bool use_symm_prop)
{
set_kernel_launch_param(n_var, n_tpb, grid, block);
printf(" grid: (%4u, %4u, %4u)\n", grid.x, grid.y, grid.z);
printf("block: (%4u, %4u, %4u)\n", block.x, block.y, block.z);
var3_t* r = (var3_t*)y_temp;
var3_t* a = (var3_t*)(dy + 3*n_obj);
nbp_t::param_t* p = (nbp_t::param_t*)d_p;
cudaEvent_t t0, t1;
CUDA_SAFE_CALL(cudaEventCreate(&t0));
CUDA_SAFE_CALL(cudaEventCreate(&t1));
CUDA_SAFE_CALL(cudaEventRecord(t0));
// Clear the acceleration array: the += op can be used
CUDA_SAFE_CALL(cudaMemset(a, 0, n_obj*sizeof(var3_t)));
// Copy the velocities into dy
// TODO: implement the asynchronous version of cudaMemcpy: Performace ??
CUDA_SAFE_CALL(cudaMemcpy(dy, y_temp + 3*n_obj, 3*n_obj*sizeof(var_t), cudaMemcpyDeviceToDevice));
if (false == use_symm_prop)
{
nbody_kernel::calc_gravity_accel_naive<<<grid, block>>>(n_obj, r, p, a);
}
else
{
nbody_kernel::calc_gravity_accel_naive_sym<<<grid, block>>>(n_obj, r, p, a);
}
CUDA_CHECK_ERROR();
CUDA_SAFE_CALL(cudaEventRecord(t1));
CUDA_SAFE_CALL(cudaEventSynchronize(t1));
float dt = 0.0f;
CUDA_SAFE_CALL(cudaEventElapsedTime(&dt, t0, t1));
return dt;
}
float gpu_calc_grav_accel_tile(uint32_t n_var, uint16_t stage, var_t curr_t, const var_t* y_temp, var_t* dy)
{
set_kernel_launch_param(n_var, n_tpb, grid, block);
printf(" grid: (%4u, %4u, %4u)\n", grid.x, grid.y, grid.z);
printf("block: (%4u, %4u, %4u)\n", block.x, block.y, block.z);
uint2_t sink = {0, n_obj};
uint2_t source = {0, n_obj};
interaction_bound int_bound(sink, source);
var3_t* r = (var3_t*)y_temp;
var3_t* a = (var3_t*)(dy + 3*n_obj);
nbp_t::param_t* p = (nbp_t::param_t*)d_p;
cudaEvent_t t0, t1;
CUDA_SAFE_CALL(cudaEventCreate(&t0));
CUDA_SAFE_CALL(cudaEventCreate(&t1));
CUDA_SAFE_CALL(cudaEventRecord(t0));
// Clear the acceleration array: the += op can be used
CUDA_SAFE_CALL(cudaMemset(a, 0, n_obj*sizeof(var3_t)));
// Copy the velocities into dy
// TODO: implement the asynchronous version of cudaMemcpy: Performace ??
CUDA_SAFE_CALL(cudaMemcpy(dy, y_temp + 3*n_obj, 3*n_obj*sizeof(var_t), cudaMemcpyDeviceToDevice));
nbody_kernel::calc_gravity_accel_tile<<<grid, block, n_tpb * sizeof(var3_t)>>>(int_bound, n_tpb, r, p, a);
CUDA_CHECK_ERROR();
CUDA_SAFE_CALL(cudaEventRecord(t1, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(t1));
float elapsed_time = 0.0f;
CUDA_SAFE_CALL(cudaEventElapsedTime(&elapsed_time, t0, t1));
return elapsed_time;
}
void cpu_calc_dy(uint16_t stage, var_t curr_t, const var_t* y_temp, var_t* dy, bool use_symm_prop)
{
// Copy the velocities into dy
memcpy(dy, y_temp + 3*n_obj, 3*n_obj*sizeof(var_t));
var3_t* r = (var3_t*)y_temp;
var3_t* a = (var3_t*)(dy + 3*n_obj);
// Clear the acceleration array: the += op can be used
memset(a, 0, 3*n_obj*sizeof(var_t));
nbp_t::param_t* p = (nbp_t::param_t*)h_p;
if (use_symm_prop)
{
for (uint32_t i = 0; i < n_obj; i++)
{
var3_t r_ij = {0, 0, 0};
for (uint32_t j = i+1; j < n_obj; j++)
{
r_ij.x = r[j].x - r[i].x;
r_ij.y = r[j].y - r[i].y;
r_ij.z = r[j].z - r[i].z;
var_t d2 = SQR(r_ij.x) + SQR(r_ij.y) + SQR(r_ij.z);
var_t d = sqrt(d2);
var_t d_3 = 1.0 / (d*d2);
var_t s = p[j].mass * d_3;
a[i].x += s * r_ij.x;
a[i].y += s * r_ij.y;
a[i].z += s * r_ij.z;
s = p[i].mass * d_3;
a[j].x -= s * r_ij.x;
a[j].y -= s * r_ij.y;
a[j].z -= s * r_ij.z;
}
a[i].x *= K2;
a[i].y *= K2;
a[i].z *= K2;
}
}
else
{
for (uint32_t i = 0; i < n_obj; i++)
{
var3_t r_ij = {0, 0, 0};
for (uint32_t j = 0; j < n_obj; j++)
{
if (i == j)
{
continue;
}
r_ij.x = r[j].x - r[i].x;
r_ij.y = r[j].y - r[i].y;
r_ij.z = r[j].z - r[i].z;
var_t d2 = SQR(r_ij.x) + SQR(r_ij.y) + SQR(r_ij.z);
var_t d = sqrt(d2);
var_t d_3 = 1.0 / (d*d2);
var_t s = p[j].mass * d_3;
a[i].x += s * r_ij.x;
a[i].y += s * r_ij.y;
a[i].z += s * r_ij.z;
}
a[i].x *= K2;
a[i].y *= K2;
a[i].z *= K2;
}
}
}
void parse(int argc, const char** argv, uint32_t* n_obj)
{
int i = 1;
if (1 >= argc)
{
throw std::string("Missing command line arguments. For help use -h.");
}
while (i < argc)
{
std::string p = argv[i];
if ( p == "-n")
{
i++;
if (!tools::is_number(argv[i]))
{
throw std::string("Invalid number at: " + p);
}
*n_obj = atoi(argv[i]);
}
else
{
throw std::string("Invalid switch on command line: " + p + ".");
}
i++;
}
}
int main(int argc, const char *argv[])
{
var_t* h_y = NULL;
var_t* h_dy = NULL;
var_t* h_dy0 = NULL;
var_t* d_y = NULL;
var_t* d_dy = NULL;
uint32_t n_var = 0;
uint32_t n_par = 0;
try
{
// n_obj is a global variable
parse(argc, argv, &n_obj);
n_var = 6 * n_obj;
n_par = 1 * n_obj;
// Allocate HOST memory
ALLOCATE_HOST_VECTOR((void**)&h_y, n_var * sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&h_dy, n_var * sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&h_dy0, n_var * sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&h_p, n_par * sizeof(var_t));
// Allocate DEVICE memory
ALLOCATE_DEVICE_VECTOR((void**)&d_y, n_var * sizeof(var_t));
ALLOCATE_DEVICE_VECTOR((void**)&d_dy, n_var * sizeof(var_t));
ALLOCATE_DEVICE_VECTOR((void**)&d_p, n_par * sizeof(var_t));
// Populate data
srand(time(NULL));
for (uint32_t i = 0; i < n_var; i++)
{
var_t r = (var_t)rand()/RAND_MAX;
*(h_y + i) = r;
}
for (uint32_t i = 0; i < n_par; i++)
{
var_t r = (var_t)rand()/RAND_MAX;
*(h_p + i) = 1;
}
CUDA_SAFE_CALL(cudaMemcpy(d_y, h_y, n_var * sizeof(var_t), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_p, h_p, n_par * sizeof(var_t), cudaMemcpyHostToDevice));
var_t t0 = 0.0;
uint16_t stage = 0;
uint64_t T0 = GetTimeMs64();
cpu_calc_dy(stage, t0, h_y, h_dy, false);
uint64_t T1 = GetTimeMs64();
var_t DT_CPU = ((var_t)(T1 - T0))/1000.0f;
printf("CPU execution time: %16.4e [ms]\n", DT_CPU);
T0 = GetTimeMs64();
cpu_calc_dy(stage, t0, h_y, h_dy0, true);
T1 = GetTimeMs64();
DT_CPU = ((var_t)(T1 - T0))/1000.0f;
printf("CPU execution time: %16.4e [ms]\n", DT_CPU);
for (uint32_t j = 0; j < n_var; j++)
{
if (1.0e-15 < fabs(h_dy[j] - h_dy0[j]))
{
printf("Difference: j = %6u : %24.16e\n", j, h_dy[j] - h_dy0[j]);
}
}
T0 = GetTimeMs64();
float _DT_GPU = gpu_calc_dy(n_var, stage, t0, d_y, d_dy, false);
T1 = GetTimeMs64();
var_t DT_GPU = ((var_t)(T1 - T0))/1000.0f;
printf("GPU execution time: %16.4e [ms]\n", DT_GPU);
printf("GPU execution time: %16.4e [ms]\n", _DT_GPU);
printf("%10u %16.4e %16.4e %16.4e %16.4e\n", n_obj, DT_CPU, DT_GPU, _DT_GPU, DT_CPU/_DT_GPU);
// Copy down the data from the DEVICE
CUDA_SAFE_CALL(cudaMemcpy(h_dy0, d_dy, n_var * sizeof(var_t), cudaMemcpyDeviceToHost));
for (uint32_t j = 0; j < n_var; j++)
{
if (1.0e-15 < fabs(h_dy[j] - h_dy0[j]))
{
printf("Difference: j = %6u : %24.16e\n", j, h_dy[j] - h_dy0[j]);
}
}
T0 = GetTimeMs64();
_DT_GPU = gpu_calc_dy(n_var, stage, t0, d_y, d_dy, true);
T1 = GetTimeMs64();
DT_GPU = ((var_t)(T1 - T0))/1000.0f;
printf("GPU execution time: %16.4e [ms]\n", DT_GPU);
printf("GPU execution time: %16.4e [ms]\n", _DT_GPU);
printf("%10u %16.4e %16.4e %16.4e %16.4e\n", n_obj, DT_CPU, DT_GPU, _DT_GPU, DT_CPU/_DT_GPU);
// Copy down the data from the DEVICE
CUDA_SAFE_CALL(cudaMemcpy(h_dy0, d_dy, n_var * sizeof(var_t), cudaMemcpyDeviceToHost));
for (uint32_t j = 0; j < n_var; j++)
{
if (1.0e-15 < fabs(h_dy[j] - h_dy0[j]))
{
printf("Difference: j = %6u : %24.16e\n", j, h_dy[j] - h_dy0[j]);
}
}
T0 = GetTimeMs64();
_DT_GPU = gpu_calc_grav_accel_tile(n_var, stage, t0, d_y, d_dy);
T1 = GetTimeMs64();
DT_GPU = ((var_t)(T1 - T0))/1000.0f;
printf("GPU execution time: %16.4e [ms]\n", DT_GPU);
printf("GPU execution time: %16.4e [ms]\n", _DT_GPU);
printf("%10u %16.4e %16.4e %16.4e %16.4e\n", n_obj, DT_CPU, DT_GPU, _DT_GPU, DT_CPU/_DT_GPU);
// Copy down the data from the DEVICE
CUDA_SAFE_CALL(cudaMemcpy(h_dy0, d_dy, n_var * sizeof(var_t), cudaMemcpyDeviceToHost));
for (uint32_t j = 0; j < n_var; j++)
{
if (1.0e-15 < fabs(h_dy[j] - h_dy0[j]))
{
printf("Difference: j = %6u : %24.16e\n", j, h_dy[j] - h_dy0[j]);
}
}
FREE_HOST_VECTOR((void**)&h_y );
FREE_HOST_VECTOR((void**)&h_dy );
FREE_HOST_VECTOR((void**)&h_dy0);
FREE_HOST_VECTOR((void**)&h_p );
FREE_DEVICE_VECTOR((void**)&d_y );
FREE_DEVICE_VECTOR((void**)&d_dy);
FREE_DEVICE_VECTOR((void**)&d_p );
}
catch (const std::string& msg)
{
std::cerr << "Error: " << msg << std::endl;
}
std::cout << "Gravitational interaction computations done.\n";
return 0;
}
#endif
#if 0
#include <stdio.h> /* printf, scanf, puts, NULL */
#include <stdlib.h> /* srand, rand, malloc */
#include <time.h> /* time */
#include <iostream>
#include <string>
#include "constants.h"
#include "type.h"
#include "redutil2.h"
using namespace std;
using namespace redutil2;
int comp_value(var_t v1, var_t v2, var_t tol, char* lpad, char* text)
{
int result = 0;
var_t d = fabs(v1 - v2);
if (tol < d)
{
printf("%s%s = %25.15lg\n", lpad, text, d);
result = 1;
}
return result;
}
int comp_oe(orbelem_t &oe1, orbelem_t& oe2, var_t tol, char* lpad)
{
int result = comp_value(oe1.sma, oe2.sma, tol, lpad, "Abs(Delta(sma ))");
result += comp_value(oe1.ecc, oe2.ecc, tol, lpad, "Abs(Delta(ecc ))");
result += comp_value(oe1.inc, oe2.inc, tol, lpad, "Abs(Delta(inc ))");
result += comp_value(oe1.peri, oe2.peri, tol, lpad, "Abs(Delta(peri))");
result += comp_value(oe1.node, oe2.node, tol, lpad, "Abs(Delta(node))");
result += comp_value(oe1.mean, oe2.mean, tol, lpad, "Abs(Delta(mean))");
return result;
}
int comp_2D_vectors(var2_t &v1, var2_t &v2, var_t tol, char* lpad)
{
int result = comp_value(v1.x, v2.x, tol, lpad, "Abs(Delta(v1.x - v2.x))");
result += comp_value(v1.y, v2.y, tol, lpad, "Abs(Delta(v1.y - v2.y))");
return result;
}
var_t random(var_t x0, var_t x1)
{
return (x0 + ((var_t)rand() / RAND_MAX) * (x1 - x0));
}
void test_calc_ephemeris()
{
// Test calculate phase from orbital elements and vice versa
{
const char func_name[] = "calc_phase";
char lpad[] = " ";
/*
* The units are:
* Unit name | Unit symbol | Quantity name
* -----------------------------------------------
* Astronomical unit | AU | length
* Solar mass | S | mass
* Mean solar day | D | time
*/
srand((unsigned int)time(NULL));
// parameter of the problem
tbp_t::param_t p;
// Set the parameter of the problem
p.mu = constants::Gauss2 * (1.0 + 1.0);
// Set the initial orbital elements
orbelem_t oe1 = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
orbelem_t oe2 = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
var3_t r0 = {0, 0, 0};
var3_t v0 = {0, 0, 0};
var_t tol = 1.0e-14;
for (int i = 0; i < 100; i++)
{
oe1.sma = random(0.1, 10.0);
oe1.ecc = random(0.0, 0.8);
oe1.inc = random(0.0, PI);
oe1.peri =random(0.0, TWOPI);
oe1.node =random(0.0, TWOPI);
oe1.mean =random(0.0, TWOPI);
// Calculate the position and velocity vectors from orbital elements
tools::calc_phase(p.mu, &oe1, &r0, &v0);
// Calculate the orbital elements from position and velocity vectors
tools::calc_oe(p.mu, &r0, &v0, &oe2);
int ret_val = comp_oe(oe1, oe2, tol, lpad);
if (0 < ret_val)
{
printf(" TEST '%s' failed with tolerance level: %g\n", func_name, tol);
}
else
{
printf(" TEST '%s' passed with tolerance level: %g\n", func_name, tol);
}
}
} /* Test calc_phase() and calc_oe() functions */
}
void test_rtbp2d_calc_energy()
{
// Test tools::tbp::calc_integral() and tools::rtbp2D::calc_integral() functions
{
const char func_name[] = "tools::tbp::calc_integral";
char lpad[] = " ";
/*
* The units are:
* Unit name | Unit symbol | Quantity name
* -----------------------------------------------
* Astronomical unit | AU | length
* Solar mass | S | mass
* Mean solar day | D | time
*/
srand(0);
orbelem_t oe = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
var3_t r0 = {0, 0, 0};
var3_t v0 = {0, 0, 0};
var_t mu = constants::Gauss2*(1.0 + 1.0);
var_t tol = 1.0e-15;
for (int i = 0; i < 10; i++)
{
// Set the initial orbital elements
oe.sma = random(0.1, 10.0);
oe.ecc = random(0.0, 0.8);
oe.inc = 0.0;
oe.peri = random(0.0, TWOPI);
oe.node = 0.0;
oe.mean = random(0.0, TWOPI);
// Calculate the position and velocity vectors from orbital elements
tools::calc_phase(mu, &oe, &r0, &v0);
// Set the starting coordinate and velocity vectors
var2_t r = {r0.x, r0.y};
var2_t v = {v0.x, v0.y};
var2_t u = {0, 0};
var2_t up = {0, 0};
tools::rtbp2D::transform_x2u(r, u);
tools::rtbp2D::transform_xd2up(u, v, up);
var_t hs = tools::tbp::calc_integral(mu, r, v);
var_t hr = tools::rtbp2D::calc_integral(mu, u, up);
printf(" hs = %25.15le\n", hs);
printf(" hr = %25.15le\n", hr);
}
// Calculate the energy along a Kepler-orbit
oe.sma = 1.5;
oe.ecc = 0.8;
oe.inc = 0.0;
oe.peri = 0.0;
oe.node = 0.0;
oe.mean = 0.0;
do
{
tools::calc_phase(mu, &oe, &r0, &v0);
var2_t r = {r0.x, r0.y};
var2_t v = {v0.x, v0.y};
var2_t u = {0, 0};
var2_t up = {0, 0};
tools::rtbp2D::transform_x2u(r, u);
tools::rtbp2D::transform_xd2up(u, v, up);
var_t hs = tools::tbp::calc_integral(mu, r, v);
var_t hr = tools::rtbp2D::calc_integral(mu, u, up);
printf("%25.15le %25.15le %25.15le\n", oe.mean, hs, hr);
oe.mean += 1.0 * constants::DegreeToRadian;
} while (oe.mean <= TWOPI);
} /* Test tools::rtbp2D::transform_x2u() and tools::rtbp2D::transform_u2x() functions */
}
void test_rtbp2d_transform()
{
// Test square (section lines)
{
var_t d = 0.01;
// Q4 -> Q1
var2_t x = {0.5, -0.5};
var2_t u = {0, 0};
do
{
tools::rtbp2D::transform_x2u(x, u);
printf("%23.15le %23.15le %23.15le %23.15le\n", x.x, x.y, u.x, u.y);
x.y += d;
} while (0.5 >= x.y);
// Q1 -> Q2
do
{
tools::rtbp2D::transform_x2u(x, u);
printf("%23.15le %23.15le %23.15le %23.15le\n", x.x, x.y, u.x, u.y);
x.x -= d;
} while (-0.5 <= x.x);
// Q2 -> Q3
do
{
tools::rtbp2D::transform_x2u(x, u);
printf("%23.15le %23.15le %23.15le %23.15le\n", x.x, x.y, u.x, u.y);
x.y -= d;
} while (-0.5 <= x.y);
// Q3 -> Q4
do
{
tools::rtbp2D::transform_x2u(x, u);
printf("%23.15le %23.15le %23.15le %23.15le\n", x.x, x.y, u.x, u.y);
x.x += d;
} while (0.5 >= x.x);
}
return;
// Test ellipse
{
const char func_name[] = "tools::rtbp2D::transform___";
char lpad[] = " ";
/*
* The units are:
* Unit name | Unit symbol | Quantity name
* -----------------------------------------------
* Astronomical unit | AU | length
* Solar mass | S | mass
* Mean solar day | D | time
*/
srand(0);
const var_t mu = constants::Gauss2*(1.0 + 1.0);
orbelem_t oe = {0.5, 0.8, 0.0, 0.0, 0.0, 0.0};
var3_t r0 = {0, 0, 0};
var3_t v0 = {0, 0, 0};
int i = 0;
do
{
oe.mean = i * constants::DegreeToRadian;
tools::calc_phase(mu, &oe, &r0, &v0);
var2_t x = {r0.x, r0.y};
var2_t xd = {v0.x, v0.y};
var2_t u = {0, 0};
var2_t up = {0, 0};
tools::rtbp2D::transform_x2u(x, u);
tools::rtbp2D::transform_xd2up(u, xd, up);
x.x = x.y = 0.0;
xd.x = xd.y = 0.0;
tools::rtbp2D::transform_u2x(u, x);
tools::rtbp2D::transform_up2xd(u, up, xd);
// Compare the original position and velocitiy vectors with the calculated ones
{
var_t tol = 1.0e-15;
var2_t x0 = {r0.x, r0.y};
var2_t x0d = {v0.x, v0.y};
comp_2D_vectors(x0, x, tol, lpad);
comp_2D_vectors(x0d, xd, tol, lpad);
}
printf("%23.15le %23.15le %23.15le %23.15le %23.15le %23.15le %23.15le %23.15le %23.15le\n", oe.mean, x.x, x.y, u.x, u.y, xd.x, xd.y, up.x, up.y);
if (0 < i && 0 == (i+1) % 90)
{
printf("\n");
}
i++;
} while (360 > i);
} /* Test tools::rtbp2D::transform_x2u() and tools::rtbp2D::transform_u2x() functions */
}
void test_calc_lin_comb()
{
// Test calculate linear combination of vectors
{
const char func_name[] = "calc_lin_comb";
char lpad[] = " ";
uint32_t n_var = 2;
uint16_t n_vct = 3;
var_t* a = NULL;
var_t* b = NULL;
var_t** c = NULL;
var_t* coeff = NULL;
ALLOCATE_HOST_VECTOR((void**)&(a), n_var*sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&(b), n_var*sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&c, n_vct*sizeof(var_t*));
for (uint16_t i = 0; i < n_vct; i++)
{
ALLOCATE_HOST_VECTOR((void**)&(c[i]), n_var*sizeof(var_t));
}
ALLOCATE_HOST_VECTOR((void**)&coeff, n_vct*sizeof(var_t));
// Populate vectors
memset(a, 0, n_var*sizeof(var_t));
for (int i = 0; i < n_var; i++)
{
b[i] = -(i+1);
}
for (uint32_t i = 0; i < n_vct; i++)
{
for (uint32_t j = 0; j < n_var; j++)
{
c[i][j] = i+j+1;
}
}
for (int i = 0; i < n_vct; i++)
{
coeff[i] = 10*i;
}
printf("The data in the vectors:\n");
printf("a:\n");
print_array("", n_var, a, PROC_UNIT_CPU);
printf("b:\n");
print_array("", n_var, b, PROC_UNIT_CPU);
for (uint32_t i = 0; i < n_vct; i++)
{
printf("c[%d]:\n", i);
print_array("", n_var, c[i], PROC_UNIT_CPU);
}
printf("The coefficients:\n");
print_array("", n_vct, coeff, PROC_UNIT_CPU);
// Calculate the linear combination of the vectors
tools::calc_lin_comb(a, c, coeff, n_vct, n_var);
printf("The linear combination of the vectors:\n");
printf("a:\n");
print_array("", n_var, a, PROC_UNIT_CPU);
// Calculate the special case of linear combination of the vectors
tools::calc_lin_comb_s(a, b, c, coeff, n_vct, n_var);
printf("The special linear combination of the vectors:\n");
printf("a:\n");
print_array("", n_var, a, PROC_UNIT_CPU);
FREE_HOST_VECTOR((void **)&(coeff));
for (uint16_t i = 0; i < n_vct; i++)
{
FREE_HOST_VECTOR((void **)&(c[i]));
}
FREE_HOST_VECTOR((void **)&(c));
FREE_HOST_VECTOR((void **)&(b));
FREE_HOST_VECTOR((void **)&(a));
}
// Test calculate linear combination of two vectors
{
const char func_name[] = "calc_lin_comb_s";
char lpad[] = " ";
uint32_t n_var = 2;
var_t* a = NULL;
var_t* b = NULL;
var_t* c = NULL;
var_t f = 3;
ALLOCATE_HOST_VECTOR((void**)&(a), n_var*sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&(b), n_var*sizeof(var_t));
ALLOCATE_HOST_VECTOR((void**)&(c), n_var*sizeof(var_t));
// Populate vectors
memset(a, 0, n_var*sizeof(var_t));
for (int i = 0; i < n_var; i++)
{
b[i] = -(i+1);
c[i] = i+1;
}
printf("The data in the vectors:\n");
printf("a:\n");
print_array("", n_var, a, PROC_UNIT_CPU);
printf("b:\n");
print_array("", n_var, b, PROC_UNIT_CPU);
printf("c:\n");
print_array("", n_var, c, PROC_UNIT_CPU);
printf("The coefficient:\n");
printf("%5e\n", f);
// Calculate the special case of linear combination of the vectors
tools::calc_lin_comb_s(a, b, c, f, n_var);
printf("The special linear combination of two vectors:\n");
printf("a:\n");
print_array("", n_var, a, PROC_UNIT_CPU);
FREE_HOST_VECTOR((void **)&(c));
FREE_HOST_VECTOR((void **)&(b));
FREE_HOST_VECTOR((void **)&(a));
}
}
/*
cd 'C:\Work\red.cuda.Results\v2.0\Test_Copy\rtbp2D\Test_transform
a=1.0
p [-1:1][-1:1]'e_0.0_q1.txt' u 2:3 w l, '' u 4:5 w l, 'e_0.0_q2.txt' u 2:3 w l, '' u 4:5 w l, 'e_0.0_q3.txt' u 2:3 w l, '' u 4:5 w l, 'e_0.0_q4.txt' u 2:3 w l, '' u 4:5 w l
a=0.05
p [-a:a][-a:a]'e_0.0_q1.txt' u 6:7 w l, '' u 8:9 w l, 'e_0.0_q2.txt' u 6:7 w l, '' u 8:9 w l, 'e_0.0_q3.txt' u 6:7 w l, '' u 8:9 w l, 'e_0.0_q4.txt' u 6:7 w l, '' u 8:9 w l
a=1.0
p [-1:1][-1:1]'e_0.2_q1.txt' u 2:3 w l, '' u 4:5 w l, 'e_0.2_q2.txt' u 2:3 w l, '' u 4:5 w l, 'e_0.2_q3.txt' u 2:3 w l, '' u 4:5 w l, 'e_0.2_q4.txt' u 2:3 w l, '' u 4:5 w l
a=0.05
p [-a:a][-a:a]'e_0.2_q1.txt' u 6:7 w l, '' u 8:9 w l, 'e_0.2_q2.txt' u 6:7 w l, '' u 8:9 w l, 'e_0.2_q3.txt' u 6:7 w l, '' u 8:9 w l, 'e_0.2_q4.txt' u 6:7 w l, '' u 8:9 w l
a=1.0
p [-1:1][-1:1]'e_0.8_q1.txt' u 2:3 w l, '' u 4:5 w l, 'e_0.8_q2.txt' u 2:3 w l, '' u 4:5 w l, 'e_0.8_q3.txt' u 2:3 w l, '' u 4:5 w l, 'e_0.8_q4.txt' u 2:3 w l, '' u 4:5 w l
a=0.05
p [-a:a][-a:a]'e_0.8_q1.txt' u 6:7 w l, '' u 8:9 w l, 'e_0.8_q2.txt' u 6:7 w l, '' u 8:9 w l, 'e_0.8_q3.txt' u 6:7 w l, '' u 8:9 w l, 'e_0.8_q4.txt' u 6:7 w l, '' u 8:9 w l
*/
int main()
{
try
{
//test_calc_ephemeris();
//test_rtbp2d_trans();
//test_rtbp2d_transform();
//test_rtbp2d_calc_energy();
test_calc_lin_comb();
}
catch (const string& msg)
{
cerr << "Error: " << msg << endl;
}
return 0;
}
#endif
|
fd77d38750e5cca0c831a95586ad5ec4ebcef09e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void add(int*a, int*b, int*c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
|
fd77d38750e5cca0c831a95586ad5ec4ebcef09e.cu
|
#include "includes.h"
__global__ void add(int*a, int*b, int*c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
|
98da5188c81246bddd7a585fdcff932262949513.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "LowPassColMulti.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_Result = NULL;
hipMalloc(&d_Result, XSIZE*YSIZE);
float *d_Data = NULL;
hipMalloc(&d_Data, XSIZE*YSIZE);
int width = XSIZE;
int pitch = 2;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
LowPassColMulti), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Result,d_Data,width,pitch,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
LowPassColMulti), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Result,d_Data,width,pitch,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
LowPassColMulti), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Result,d_Data,width,pitch,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
98da5188c81246bddd7a585fdcff932262949513.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "LowPassColMulti.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_Result = NULL;
cudaMalloc(&d_Result, XSIZE*YSIZE);
float *d_Data = NULL;
cudaMalloc(&d_Data, XSIZE*YSIZE);
int width = XSIZE;
int pitch = 2;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
LowPassColMulti<<<gridBlock,threadBlock>>>(d_Result,d_Data,width,pitch,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
LowPassColMulti<<<gridBlock,threadBlock>>>(d_Result,d_Data,width,pitch,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
LowPassColMulti<<<gridBlock,threadBlock>>>(d_Result,d_Data,width,pitch,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
06f1a3ca78df603a626fabcb58ed0ad65cc6e8e7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "../../../src/image_buffer.h"
#include "../../../src/image_exception.h"
#include "../../../src/image_function.h"
#include "../../../src/cuda/cuda_types.cuh"
#include "../../../src/cuda/cuda_helper.cuh"
#include "../../../src/cuda/image_buffer_cuda.cuh"
#include "../unit_test_helper.h"
#include "unit_test_helper_cuda.cuh"
namespace
{
__global__ void isEqualCuda( const uint8_t * image, uint8_t value, uint32_t rowSize, uint32_t width, uint32_t height, uint32_t * differenceCount )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint32_t id = y * rowSize + x;
if ( image[id] == value )
atomicAdd( differenceCount, 1 );
}
}
__global__ void isAnyEqualCuda( const uint8_t * image, uint8_t * value, size_t valueCount, uint32_t width, uint32_t height,
uint32_t * differenceCount )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint32_t id = y * width + x;
bool equal = false;
for ( uint32_t i = 0; i < valueCount; ++i ) {
if ( image[id] == value[i] ) {
equal = true;
break;
}
}
if ( equal )
atomicAdd( differenceCount, 1 );
}
}
}
namespace Unit_Test
{
namespace Cuda
{
bool verifyImage( const PenguinV_Image::Image & image, uint8_t value )
{
return verifyImage( image, 0, 0, image.width(), image.height(), value );
}
bool verifyImage( const PenguinV_Image::Image & image, const std::vector < uint8_t > & value )
{
multiCuda::Type<uint32_t> differenceCount( 0 );
multiCuda::Array<uint8_t> valueCuda( value );
const uint32_t rowSize = image.rowSize();
const uint32_t height = image.height();
launchKernel2D( isAnyEqualCuda, rowSize, height,
image.data(), valueCuda.data(), valueCuda.size(), rowSize, height, differenceCount.data() );
return differenceCount.get() == rowSize * height;
}
bool verifyImage( const PenguinV_Image::Image & image, uint32_t x, uint32_t y, uint32_t width, uint32_t height, uint8_t value )
{
multiCuda::Type<uint32_t> differenceCount( 0 );
const uint8_t colorCount = image.colorCount();
width = width * colorCount;
const uint32_t rowSize = image.rowSize();
const uint8_t * data = image.data() + y * rowSize + x * colorCount;
launchKernel2D( isEqualCuda, width, height,
data, value, rowSize, width, height, differenceCount.data() );
return differenceCount.get() == width * height;
}
}
}
|
06f1a3ca78df603a626fabcb58ed0ad65cc6e8e7.cu
|
#include <cuda_runtime.h>
#include "../../../src/image_buffer.h"
#include "../../../src/image_exception.h"
#include "../../../src/image_function.h"
#include "../../../src/cuda/cuda_types.cuh"
#include "../../../src/cuda/cuda_helper.cuh"
#include "../../../src/cuda/image_buffer_cuda.cuh"
#include "../unit_test_helper.h"
#include "unit_test_helper_cuda.cuh"
namespace
{
__global__ void isEqualCuda( const uint8_t * image, uint8_t value, uint32_t rowSize, uint32_t width, uint32_t height, uint32_t * differenceCount )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint32_t id = y * rowSize + x;
if ( image[id] == value )
atomicAdd( differenceCount, 1 );
}
}
__global__ void isAnyEqualCuda( const uint8_t * image, uint8_t * value, size_t valueCount, uint32_t width, uint32_t height,
uint32_t * differenceCount )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint32_t id = y * width + x;
bool equal = false;
for ( uint32_t i = 0; i < valueCount; ++i ) {
if ( image[id] == value[i] ) {
equal = true;
break;
}
}
if ( equal )
atomicAdd( differenceCount, 1 );
}
}
}
namespace Unit_Test
{
namespace Cuda
{
bool verifyImage( const PenguinV_Image::Image & image, uint8_t value )
{
return verifyImage( image, 0, 0, image.width(), image.height(), value );
}
bool verifyImage( const PenguinV_Image::Image & image, const std::vector < uint8_t > & value )
{
multiCuda::Type<uint32_t> differenceCount( 0 );
multiCuda::Array<uint8_t> valueCuda( value );
const uint32_t rowSize = image.rowSize();
const uint32_t height = image.height();
launchKernel2D( isAnyEqualCuda, rowSize, height,
image.data(), valueCuda.data(), valueCuda.size(), rowSize, height, differenceCount.data() );
return differenceCount.get() == rowSize * height;
}
bool verifyImage( const PenguinV_Image::Image & image, uint32_t x, uint32_t y, uint32_t width, uint32_t height, uint8_t value )
{
multiCuda::Type<uint32_t> differenceCount( 0 );
const uint8_t colorCount = image.colorCount();
width = width * colorCount;
const uint32_t rowSize = image.rowSize();
const uint8_t * data = image.data() + y * rowSize + x * colorCount;
launchKernel2D( isEqualCuda, width, height,
data, value, rowSize, width, height, differenceCount.data() );
return differenceCount.get() == width * height;
}
}
}
|
4e01762c70485063e8b5df31454072f3244695aa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "GPUManager/GPUCustomTypes.h"
#include "GPUManager/CUDA-Complex.cuh"
__global__ void
GPUComplexCoeff_kernel(GPU_AMP_PROTO, GDouble param1, GDouble param2, bool represReIm)
{
if(represReIm) {
WCUComplex ans = { param1, param2 };
pcDevAmp[GPU_THIS_EVENT] = ans;
}
else {
WCUComplex ans = { param1*cos(param2), param1*sin(param2) };
pcDevAmp[GPU_THIS_EVENT] = ans;
}
}
void
GPUComplexCoeff_exec(dim3 dimGrid, dim3 dimBlock, GPU_AMP_PROTO, GDouble m_param1, GDouble m_param2, bool m_represReIm)
{
hipLaunchKernelGGL(( GPUComplexCoeff_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, GPU_AMP_ARGS, m_param1, m_param2, m_represReIm);
}
|
4e01762c70485063e8b5df31454072f3244695aa.cu
|
#include <stdio.h>
#include "GPUManager/GPUCustomTypes.h"
#include "GPUManager/CUDA-Complex.cuh"
__global__ void
GPUComplexCoeff_kernel(GPU_AMP_PROTO, GDouble param1, GDouble param2, bool represReIm)
{
if(represReIm) {
WCUComplex ans = { param1, param2 };
pcDevAmp[GPU_THIS_EVENT] = ans;
}
else {
WCUComplex ans = { param1*cos(param2), param1*sin(param2) };
pcDevAmp[GPU_THIS_EVENT] = ans;
}
}
void
GPUComplexCoeff_exec(dim3 dimGrid, dim3 dimBlock, GPU_AMP_PROTO, GDouble m_param1, GDouble m_param2, bool m_represReIm)
{
GPUComplexCoeff_kernel<<< dimGrid, dimBlock >>>(GPU_AMP_ARGS, m_param1, m_param2, m_represReIm);
}
|
d9285c76cb23c3f75ec66349d64dd9f598fca74f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Output of "python3 circuit2.py sum.crc" is pasted in as the kernel
// along with some boilerplate code to test this out.
// Expected output: "28 1 5 3 22 5 13 7\n".
#include <iostream>
__global__ void sum(int *x0, int *x1){
int tid = threadIdx.x;
x1[tid] = x0[tid];
switch(tid){
case 0:
x1[0] += x0[1];
break;
case 2:
x1[2] += x0[3];
break;
case 4:
x1[4] += x0[5];
break;
case 6:
x1[6] += x0[7];
break;
}
__syncthreads();
x0[tid] = x1[tid];
switch(tid){
case 0:
x0[0] += x1[2];
break;
case 4:
x0[4] += x1[6];
break;
}
__syncthreads();
x1[tid] = x0[tid];
switch(tid){
case 0:
x1[0] += x0[4];
break;
}
__syncthreads();
x0[tid] = x1[tid];
switch(tid){
}
}
int main(){
int *x;
x = (int *) malloc(8 * sizeof(int));
for(int i = 0; i < 8; i++){
x[i] = i;
}
int *d_x0, *d_x1;
hipMalloc(&d_x0, 8 * sizeof(int));
hipMalloc(&d_x1, 8 * sizeof(int));
hipMemcpy(d_x0, x, 8 * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sum), dim3(1), dim3(8), 0, 0, d_x0, d_x1);
hipDeviceSynchronize();
hipMemcpy(x, d_x0, 8 * sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0; i < 8; i++){
std::cout << x[i] << ' ';
}
std::cout << '\n';
}
|
d9285c76cb23c3f75ec66349d64dd9f598fca74f.cu
|
// Output of "python3 circuit2.py sum.crc" is pasted in as the kernel
// along with some boilerplate code to test this out.
// Expected output: "28 1 5 3 22 5 13 7\n".
#include <iostream>
__global__ void sum(int *x0, int *x1){
int tid = threadIdx.x;
x1[tid] = x0[tid];
switch(tid){
case 0:
x1[0] += x0[1];
break;
case 2:
x1[2] += x0[3];
break;
case 4:
x1[4] += x0[5];
break;
case 6:
x1[6] += x0[7];
break;
}
__syncthreads();
x0[tid] = x1[tid];
switch(tid){
case 0:
x0[0] += x1[2];
break;
case 4:
x0[4] += x1[6];
break;
}
__syncthreads();
x1[tid] = x0[tid];
switch(tid){
case 0:
x1[0] += x0[4];
break;
}
__syncthreads();
x0[tid] = x1[tid];
switch(tid){
}
}
int main(){
int *x;
x = (int *) malloc(8 * sizeof(int));
for(int i = 0; i < 8; i++){
x[i] = i;
}
int *d_x0, *d_x1;
cudaMalloc(&d_x0, 8 * sizeof(int));
cudaMalloc(&d_x1, 8 * sizeof(int));
cudaMemcpy(d_x0, x, 8 * sizeof(int), cudaMemcpyHostToDevice);
sum<<<1, 8>>>(d_x0, d_x1);
cudaDeviceSynchronize();
cudaMemcpy(x, d_x0, 8 * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < 8; i++){
std::cout << x[i] << ' ';
}
std::cout << '\n';
}
|
1c5a3304f979efd63bc338194e1f59961b52c416.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "taso/ops.h"
#include "taso/cuda_helper.h"
using namespace taso;
void Matmul::map(void)
{
// create descriptors
checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor));
helperSetTensorDescriptor(outputs[0], outputTensor);
if (activation != AC_MODE_NONE) {
cudnnActivationMode_t mode;
switch (activation) {
case AC_MODE_SIGMOID:
mode = CUDNN_ACTIVATION_SIGMOID;
break;
case AC_MODE_RELU:
mode = CUDNN_ACTIVATION_RELU;
break;
case AC_MODE_TANH:
mode = CUDNN_ACTIVATION_TANH;
break;
default:
assert(false);
}
checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc));
checkCUDNN(cudnnSetActivationDescriptor(actiDesc, mode,
CUDNN_NOT_PROPAGATE_NAN, 0.0));
}
// allocate tensors
size_t outputSize = sizeof(DATATYPE) * outputs[0].volume();
checkCUDA(hipMalloc(&outputs[0].data_ptr, outputSize));
}
void Matmul::unmap(void)
{
checkCUDNN(cudnnDestroyTensorDescriptor(outputTensor));
if (activation != AC_MODE_NONE) {
checkCUDNN(cudnnDestroyActivationDescriptor(actiDesc));
}
checkCUDA(hipFree(outputs[0].data_ptr));
}
void Matmul::forward(bool block)
{
const float alpha = 1.0f;
const float beta = 0.0f;
int numDim = outputs[0].numDim;
int m = inputs[0].dim[numDim-2];
int n = inputs[1].dim[numDim-1];
int k = inputs[0].dim[numDim-1];
hipblasOperation_t transA, transB;
int lda, ldb, ldc;
if (inputs[0].stride[numDim-2] == 1) {
transA = HIPBLAS_OP_N;
lda = inputs[0].stride[numDim-1];
} else {
assert(inputs[0].stride[numDim-1] == 1);
transA = HIPBLAS_OP_T;
lda = inputs[0].stride[numDim-2];
}
if (inputs[1].stride[numDim-2] == 1) {
transB = HIPBLAS_OP_N;
ldb = inputs[1].stride[numDim-1];
} else {
assert(inputs[1].stride[numDim-1] == 1);
transB = HIPBLAS_OP_T;
ldb = inputs[1].stride[numDim-2];
}
ldc = outputs[0].stride[numDim-1];
if (numDim == 2) {
// Normal 2D Matmul
checkCUDA(hipblasSgemm(model->blas, transA, transB,
m, n, k, &alpha, (float*)inputs[0].data_ptr, lda,
(float*)inputs[1].data_ptr, ldb, &beta, (float*)outputs[0].data_ptr, ldc));
} else {
// Batched Matmul
int strideA = inputs[0].stride[numDim-3];
int strideB = inputs[1].stride[numDim-3];
int strideC = outputs[0].stride[numDim-3];
int batch = 1;
for (int i = 0; i < numDim-2; i++)
batch *= outputs[0].dim[i];
checkCUDA(hipblasSgemmStridedBatched(model->blas, transA, transB,
m, n, k, &alpha, (float*)inputs[0].data_ptr, lda, strideA,
(float*)inputs[1].data_ptr, ldb, strideB,
&beta, (float*)outputs[0].data_ptr, ldc, strideC, batch));
}
if (activation != AC_MODE_NONE)
checkCUDNN(cudnnActivationForward(model->dnn, actiDesc,
&alpha, outputTensor, outputs[0].data_ptr,
&beta, outputTensor, outputs[0].data_ptr));
if (block)
checkCUDA(hipDeviceSynchronize());
}
void Model::measure_matmul_cost(Matmul* mm)
{
const float alpha = 1.0f;
const float beta = 0.0f;
int numDim = mm->outputs[0].numDim;
int m = mm->inputs[0].dim[numDim-2];
int n = mm->inputs[1].dim[numDim-1];
int k = mm->inputs[0].dim[numDim-1];
hipblasOperation_t transA, transB;
int lda, ldb, ldc;
if (mm->inputs[0].stride[numDim-2] == 1) {
transA = HIPBLAS_OP_N;
lda = mm->inputs[0].stride[numDim-1];
} else {
assert(mm->inputs[0].stride[numDim-1] == 1);
transA = HIPBLAS_OP_T;
lda = mm->inputs[0].stride[numDim-2];
}
if (mm->inputs[1].stride[numDim-2] == 1) {
transB = HIPBLAS_OP_N;
ldb = mm->inputs[1].stride[numDim-1];
} else {
assert(mm->inputs[1].stride[numDim-1] == 1);
transB = HIPBLAS_OP_T;
ldb = mm->inputs[1].stride[numDim-2];
}
ldc = mm->outputs[0].stride[numDim-1];
if (mm->activation != AC_MODE_NONE) {
cudnnActivationMode_t mode;
switch (mm->activation) {
case AC_MODE_SIGMOID:
mode = CUDNN_ACTIVATION_SIGMOID;
break;
case AC_MODE_RELU:
mode = CUDNN_ACTIVATION_RELU;
break;
case AC_MODE_TANH:
mode = CUDNN_ACTIVATION_TANH;
break;
default:
assert(false);
}
checkCUDNN(cudnnSetActivationDescriptor(actiDesc, mode,
CUDNN_NOT_PROPAGATE_NAN, 0.0));
}
helperSetTensorDescriptor(mm->outputs[0], outputTensor);
checkCUDA(hipDeviceSynchronize());
for (int i = 0; i < WARMUP_TIMES + REPEAT_TIMES; i++) {
if (i == WARMUP_TIMES)
checkCUDA(hipEventRecord(startEvent));
if (numDim == 2) {
// Normal 2D Matmul
checkCUDA(hipblasSgemm(blas, transA, transB,
m, n, k, &alpha, inputPtr, lda,
filterPtr, ldb, &beta, outputPtr, ldc));
} else {
// Batched Matmul
int strideA = mm->inputs[0].stride[numDim-3];
int strideB = mm->inputs[1].stride[numDim-3];
int strideC = mm->outputs[0].stride[numDim-3];
int batch = 1;
for (int i = 0; i < numDim-2; i++)
batch *= mm->outputs[0].dim[i];
checkCUDA(hipblasSgemmStridedBatched(blas, transA, transB,
m, n, k, &alpha, inputPtr, lda, strideA,
filterPtr, ldb, strideB,
&beta, outputPtr, ldc, strideC, batch));
}
if (mm->activation != AC_MODE_NONE)
checkCUDNN(cudnnActivationForward(dnn, actiDesc,
&alpha, outputTensor, outputPtr,
&beta, outputTensor, outputPtr));
}
checkCUDA(hipEventRecord(endEvent));
checkCUDA(hipEventSynchronize(endEvent));
float milliseconds;
hipEventElapsedTime(&milliseconds, startEvent, endEvent);
mm->runtime = milliseconds / REPEAT_TIMES;
if (print_cost)
printf(" measure[Matmul]: %s %s acti(%d) cost(%.4lf)\n",
mm->inputs[0].to_string("input").c_str(),
mm->inputs[1].to_string("weight").c_str(),
mm->activation, mm->runtime);
}
|
1c5a3304f979efd63bc338194e1f59961b52c416.cu
|
/* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "taso/ops.h"
#include "taso/cuda_helper.h"
using namespace taso;
void Matmul::map(void)
{
// create descriptors
checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor));
helperSetTensorDescriptor(outputs[0], outputTensor);
if (activation != AC_MODE_NONE) {
cudnnActivationMode_t mode;
switch (activation) {
case AC_MODE_SIGMOID:
mode = CUDNN_ACTIVATION_SIGMOID;
break;
case AC_MODE_RELU:
mode = CUDNN_ACTIVATION_RELU;
break;
case AC_MODE_TANH:
mode = CUDNN_ACTIVATION_TANH;
break;
default:
assert(false);
}
checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc));
checkCUDNN(cudnnSetActivationDescriptor(actiDesc, mode,
CUDNN_NOT_PROPAGATE_NAN, 0.0));
}
// allocate tensors
size_t outputSize = sizeof(DATATYPE) * outputs[0].volume();
checkCUDA(cudaMalloc(&outputs[0].data_ptr, outputSize));
}
void Matmul::unmap(void)
{
checkCUDNN(cudnnDestroyTensorDescriptor(outputTensor));
if (activation != AC_MODE_NONE) {
checkCUDNN(cudnnDestroyActivationDescriptor(actiDesc));
}
checkCUDA(cudaFree(outputs[0].data_ptr));
}
void Matmul::forward(bool block)
{
const float alpha = 1.0f;
const float beta = 0.0f;
int numDim = outputs[0].numDim;
int m = inputs[0].dim[numDim-2];
int n = inputs[1].dim[numDim-1];
int k = inputs[0].dim[numDim-1];
cublasOperation_t transA, transB;
int lda, ldb, ldc;
if (inputs[0].stride[numDim-2] == 1) {
transA = CUBLAS_OP_N;
lda = inputs[0].stride[numDim-1];
} else {
assert(inputs[0].stride[numDim-1] == 1);
transA = CUBLAS_OP_T;
lda = inputs[0].stride[numDim-2];
}
if (inputs[1].stride[numDim-2] == 1) {
transB = CUBLAS_OP_N;
ldb = inputs[1].stride[numDim-1];
} else {
assert(inputs[1].stride[numDim-1] == 1);
transB = CUBLAS_OP_T;
ldb = inputs[1].stride[numDim-2];
}
ldc = outputs[0].stride[numDim-1];
if (numDim == 2) {
// Normal 2D Matmul
checkCUDA(cublasSgemm(model->blas, transA, transB,
m, n, k, &alpha, (float*)inputs[0].data_ptr, lda,
(float*)inputs[1].data_ptr, ldb, &beta, (float*)outputs[0].data_ptr, ldc));
} else {
// Batched Matmul
int strideA = inputs[0].stride[numDim-3];
int strideB = inputs[1].stride[numDim-3];
int strideC = outputs[0].stride[numDim-3];
int batch = 1;
for (int i = 0; i < numDim-2; i++)
batch *= outputs[0].dim[i];
checkCUDA(cublasSgemmStridedBatched(model->blas, transA, transB,
m, n, k, &alpha, (float*)inputs[0].data_ptr, lda, strideA,
(float*)inputs[1].data_ptr, ldb, strideB,
&beta, (float*)outputs[0].data_ptr, ldc, strideC, batch));
}
if (activation != AC_MODE_NONE)
checkCUDNN(cudnnActivationForward(model->dnn, actiDesc,
&alpha, outputTensor, outputs[0].data_ptr,
&beta, outputTensor, outputs[0].data_ptr));
if (block)
checkCUDA(cudaDeviceSynchronize());
}
void Model::measure_matmul_cost(Matmul* mm)
{
const float alpha = 1.0f;
const float beta = 0.0f;
int numDim = mm->outputs[0].numDim;
int m = mm->inputs[0].dim[numDim-2];
int n = mm->inputs[1].dim[numDim-1];
int k = mm->inputs[0].dim[numDim-1];
cublasOperation_t transA, transB;
int lda, ldb, ldc;
if (mm->inputs[0].stride[numDim-2] == 1) {
transA = CUBLAS_OP_N;
lda = mm->inputs[0].stride[numDim-1];
} else {
assert(mm->inputs[0].stride[numDim-1] == 1);
transA = CUBLAS_OP_T;
lda = mm->inputs[0].stride[numDim-2];
}
if (mm->inputs[1].stride[numDim-2] == 1) {
transB = CUBLAS_OP_N;
ldb = mm->inputs[1].stride[numDim-1];
} else {
assert(mm->inputs[1].stride[numDim-1] == 1);
transB = CUBLAS_OP_T;
ldb = mm->inputs[1].stride[numDim-2];
}
ldc = mm->outputs[0].stride[numDim-1];
if (mm->activation != AC_MODE_NONE) {
cudnnActivationMode_t mode;
switch (mm->activation) {
case AC_MODE_SIGMOID:
mode = CUDNN_ACTIVATION_SIGMOID;
break;
case AC_MODE_RELU:
mode = CUDNN_ACTIVATION_RELU;
break;
case AC_MODE_TANH:
mode = CUDNN_ACTIVATION_TANH;
break;
default:
assert(false);
}
checkCUDNN(cudnnSetActivationDescriptor(actiDesc, mode,
CUDNN_NOT_PROPAGATE_NAN, 0.0));
}
helperSetTensorDescriptor(mm->outputs[0], outputTensor);
checkCUDA(cudaDeviceSynchronize());
for (int i = 0; i < WARMUP_TIMES + REPEAT_TIMES; i++) {
if (i == WARMUP_TIMES)
checkCUDA(cudaEventRecord(startEvent));
if (numDim == 2) {
// Normal 2D Matmul
checkCUDA(cublasSgemm(blas, transA, transB,
m, n, k, &alpha, inputPtr, lda,
filterPtr, ldb, &beta, outputPtr, ldc));
} else {
// Batched Matmul
int strideA = mm->inputs[0].stride[numDim-3];
int strideB = mm->inputs[1].stride[numDim-3];
int strideC = mm->outputs[0].stride[numDim-3];
int batch = 1;
for (int i = 0; i < numDim-2; i++)
batch *= mm->outputs[0].dim[i];
checkCUDA(cublasSgemmStridedBatched(blas, transA, transB,
m, n, k, &alpha, inputPtr, lda, strideA,
filterPtr, ldb, strideB,
&beta, outputPtr, ldc, strideC, batch));
}
if (mm->activation != AC_MODE_NONE)
checkCUDNN(cudnnActivationForward(dnn, actiDesc,
&alpha, outputTensor, outputPtr,
&beta, outputTensor, outputPtr));
}
checkCUDA(cudaEventRecord(endEvent));
checkCUDA(cudaEventSynchronize(endEvent));
float milliseconds;
cudaEventElapsedTime(&milliseconds, startEvent, endEvent);
mm->runtime = milliseconds / REPEAT_TIMES;
if (print_cost)
printf(" measure[Matmul]: %s %s acti(%d) cost(%.4lf)\n",
mm->inputs[0].to_string("input").c_str(),
mm->inputs[1].to_string("weight").c_str(),
mm->activation, mm->runtime);
}
|
ee3d89c19bf8cd0cba4de54816e109e0409f3e59.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-----------------------------------------------------------------------------
Name: GPU_PHASE1.cu
Desc: This file contains GPU kernels for building a kd-tree
The kd-nodes are stored in a left balanced layout
Notes:
Kd-tree attributes
static -- we need to know all points "a priori" before building the kd-tree
balanced -- Tree has maximum height of O( log<2> n )
Left-Balanced tree array layout
-- The kd-nodes in the kd-tree are stored in a left-balanced tree layout
-- Given n points, We allocate n+1 nodes
-- The kd-node at index zero is ignored (wasted space)
-- The Root kd-node is always found at index 1
-- Given any node at position 'i'
-- The parent node is found at 'i/2'
-- The left child node is found at '2*i'
-- The right child node is found at '2*i+1'
d-Dimensionality -- 2D, 3D, 4D, ...
cyclical -- we follow a cyclical pattern in switching between axes
at each level of the tree,
for 2D <x,y,x,y,x,y,...>
for 3D <x,y,z,x,y,z,...>
for 4D <x,y,z,w,x,y,z,w,...>
for 6D <x,y,z,w,s,t,x,y,z,w,s,t,...>
etc.
Point Storage -- 1 search point is stored at each internal or leaf node
Minimal -- I have eliminated as many fields as possible
from the final kd-node data structures.
The only remaining field is the stored search point
During the build process, we need some temporary extra fields for tracking.
by Shawn Brown ([email protected])
-----------------------------------------------------------------------------*/
/*---------------------------------------------------------
Includes
---------------------------------------------------------*/
#include <stdio.h>
//#include <float.h>
#include "GPUTREE_API.h"
/*---------------------------------------------------------
Function Definitions
---------------------------------------------------------*/
/*-------------------------------------------------------------------------
Name: GPU_AxisValue
Desc: Helper Method
-------------------------------------------------------------------------*/
__device__ inline float GPU_NODE_2D_MED_AxisValue
(
const GPUNode_2D_MED * currNodes, // IN: IN node list
unsigned int index, // IN: Index of node to retrieve value for
unsigned int axis // IN: axis of value to retrieve
)
{
return currNodes[index].pos[axis];
}
__device__ inline float GPU_NODE_2D_LBT_AxisValue
(
const GPUNode_2D_LBT * currNodes, // IN: IN node list
unsigned int index, // IN: Index of node to retrieve value for
unsigned int axis // IN: axis of value to retrieve
)
{
return currNodes[index].pos[axis];
}
/*-------------------------------------------------------------------------
Name: GPU_Swap
Desc: Helper Method
-------------------------------------------------------------------------*/
__device__ inline void GPU_2D_NODE_MED_Swap
(
GPUNode_2D_MED * currNodes, // IN: Median node list
unsigned int idx1, // IN: Index of 1st node to swap
unsigned int idx2 // IN: Index of 2nd node to swap
)
{
GPUNode_2D_MED temp = currNodes[idx1]; // slow read
currNodes[idx1] = currNodes[idx2]; // slow read and write
currNodes[idx2] = temp; // slow write
}
__device__ inline void GPU_2D_NODE_LBT_Swap
(
GPUNode_2D_LBT * currNodes, // IN: left-balanced node list
unsigned int idx1, // IN: Index of 1st node to swap
unsigned int idx2 // IN: Index of 2nd node to swap
)
{
GPUNode_2D_LBT temp = currNodes[idx1]; // slow read
currNodes[idx1] = currNodes[idx2]; // slow read and write
currNodes[idx2] = temp; // slow write
}
/*-------------------------------------------------------------------------
Name: GPU_MedianOf3
Desc: Helper method,
Implements Median of three variant
for Median Partitioning algorithm
returns pivot value for partitioning algorithm
Note: finds middle element of left, mid, and right
where mid = (left+right)/2
enforces invariant that
array[left].val <= array[mid].val <= array[right].val
-------------------------------------------------------------------------*/
__device__ inline unsigned int GPU_2D_NODE_MED_MedianOf3
(
GPUNode_2D_MED * currNodes, // IN - node list
unsigned int leftIdx, // IN - left index
unsigned int rightIdx, // IN - right index
unsigned int axis // IN - axis to compare
)
{
// Compute Middle Index from left and right
unsigned int middleIdx = (leftIdx+rightIdx)/2;
unsigned int temp;
float leftVal = GPU_NODE_2D_MED_AxisValue( currNodes, leftIdx, axis );
float rightVal = GPU_NODE_2D_MED_AxisValue( currNodes, rightIdx, axis );
float middleVal = GPU_NODE_2D_MED_AxisValue( currNodes, middleIdx, axis );
// Sort left, center, mid values into correct order
if (leftVal > middleVal)
{
// Swap left and middle indices
temp = leftIdx;
leftIdx = middleIdx;
middleIdx = temp;
}
if (leftVal > rightVal)
{
// Swap left and right indices
temp = leftIdx;
leftIdx = rightIdx;
rightIdx = temp;
}
if (middleVal > rightVal)
{
// Swap middle and right indices
temp = middleIdx;
middleIdx = rightIdx;
rightIdx = temp;
}
// return middle as the pivot
return middleIdx;
}
/*---------------------------------------------------------
Name: GPU_PICK_PIVOT
Desc: Count # of nodes in range[start,end]
That are before, after, or equal to pivot value
---------------------------------------------------------*/
__global__ void
GPU_2D_NODE_MED_PICK_PIVOT
(
unsigned int * pivot, // OUT - pivot result
GPUNode_2D_MED * currNodes, // IN - node list
unsigned int start, // IN - range [start,end] to median select
unsigned int end,
unsigned int axis // IN - axis to compare
)
{
// Block thread index (local)
const int bidx = (threadIdx.y*blockDim.x) + threadIdx.x;
if (0 == bidx)
{
pivot[0] = GPU_2D_NODE_MED_MedianOf3( currNodes, start, end, axis );
}
}
/*---------------------------------------------------------
Name: GPU_COUNTS
Desc: Count # of nodes in range[start,end]
That are before, after, or equal to pivot value
---------------------------------------------------------*/
__global__ void
GPU_2D_NODE_MED_COUNTS
(
GPU_COUNTS_STARTS * counts, // OUT: counts
GPUNode_2D_MED * srcNodes, // IN: nodes are read & copied from this list (source)
GPUNode_2D_MED * dstNodes, // OUT: nodes are copied into this list (dest = scratch)
unsigned int * pivot, // IN: pivot location
unsigned int nNodes, // IN: number of nodes
unsigned int start, // IN: start of range to count
unsigned int end, // IN: end of range to count
unsigned int axis // IN: axis of dimension to work with
)
{
__shared__ GPUNode_2D_MED currNode[BUILD_THREADS_PER_BLOCK]; // Current thread starts
__shared__ GPU_COUNTS_STARTS currCount[BUILD_THREADS_PER_BLOCK]; // Current thread starts
// Local Variables (registers)
float pivotVal, currVal;
unsigned int countBefore = 0; // # of elements less than pivot value (x < n[p])
unsigned int countAfter = 0; // # of elements greater than pivot value (x > n[p]
unsigned int countEqual = 0;
unsigned int startRow, endRow, currRow, pivotIdx;
unsigned int startIdx, currIdx, leftOver;
// Read in pivot value
// Slow read from global memory (coalesced ???)
pivotIdx = pivot[0];
pivotVal = srcNodes[pivotIdx].pos[axis];
/*-----------------------
Compute Thread Column
-----------------------*/
// Block thread index (local)
const int bidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Grid thread idx (global)
const int width = (gridDim.x * blockDim.x); // width of a grid row
//const int height = (gridDim.y * blockDim.y); // height of a grid column
const int cRow = (blockIdx.y * blockDim.y) + threadIdx.y; // thread row in grid of blocks
const int cCol = (blockIdx.x * blockDim.x) + threadIdx.x; // thread column in grid of blocks
const int gidx = (cRow * width) + cCol;
// Compute Start & End Rows
startRow = (start-1) / (unsigned int)(width); // startRow = floor( start/width )
endRow = (end - 1) / (unsigned int)(width); // endRow = ceil( end/width )
leftOver = (end - 1) - (endRow*(unsigned int)(width));
endRow = (leftOver > 0) ? endRow+1 : endRow;
/*-----------------------
count elements
-----------------------*/
startIdx = startRow * (unsigned int)(width) + (unsigned int)cCol;
currIdx = startIdx;
for (currRow = startRow; currRow <= endRow; currRow++)
{
if ((currIdx < start) || (currIdx > end))
{
// Do nothing, the current element is outside of the range [start,end]
}
else
{
// Get current value
// Slow read from global memory (coalesced ???)
currNode[bidx] = srcNodes[currIdx];
// Count # of values before and after pivot
currVal = currNode[bidx].pos[axis];
if (currVal < pivotVal)
{
countBefore++;
}
else if (currVal > pivotVal)
{
countAfter++;
}
else
{
countEqual++;
}
// Write node to scratch buffer
// Slow write to external memory (coalesced ???)
dstNodes[currIdx] = currNode[bidx];
}
// Move to next row
currIdx += width;
}
// Store counts (shared memory)
currCount[bidx].before = countBefore;
currCount[bidx].after = countAfter;
currCount[bidx].equal = countEqual;
// Store counts (global memory)
// Slow write to global memory (coalesced ???)
counts[gidx] = currCount[bidx];
}
/*---------------------------------------------------------
Name: GPU_PARTITION_2D
Desc: Partitions original data set {O}=[start,end]
with 'n' elements into 3 datasets <{l}, {m}, {r}}
That are before, after, or equal to pivot value.
---------------------------------------------------------*/
__global__ void
GPU_2D_NODE_MED_PARTITION
(
GPU_COUNTS_STARTS * starts, // OUT: starts
GPUNode_2D_MED * srcNodes, // IN/OUT: Nodes are read from this list (source = scratch)
GPUNode_2D_MED * dstNodes, // IN/OUT: Nodes are partitioned into this list (dest)
unsigned int nNodes, // IN: number of nodes
unsigned int start, // IN: start of range to partition
unsigned int end, // IN: end of range to partition
unsigned int axis, // IN: axis of dimension to work with
unsigned int * pivot // IN: pivot index
)
{
// Local Parameters (shared memory)
__shared__ GPUNode_2D_MED currNode[BUILD_THREADS_PER_BLOCK]; // Current thread starts
__shared__ GPU_COUNTS_STARTS currStart[BUILD_THREADS_PER_BLOCK]; // Current thread starts
// BUGBUG: need to write
// Local Variables (registers)
float pivotVal, currVal;
unsigned int startBefore, startAfter, startEqual, pivotIdx;
unsigned int startRow, endRow, currRow, leftOver, outIdx;
unsigned int startIdx, currIdx;
// Read in pivot value
// Slow read from global memory (coalesced ???)
pivotIdx = pivot[0];
pivotVal = srcNodes[pivotIdx].pos[axis];
/*-----------------------
Compute Thread Column
-----------------------*/
// Block thread index (local)
const int bidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Grid thread idx (global)
const int width = (gridDim.x * blockDim.x); // width of a row
//const int height = (gridDim.y * blockDim.y); // # max threads in a column
//const int maxElems = (gW * width); // # max threads in grid of blocks
const int cRow = (blockIdx.y * blockDim.y) + threadIdx.y; // thread row in grid of blocks
const int cCol = (blockIdx.x * blockDim.x) + threadIdx.x; // thread column in grid of blocks
const int gidx = (cRow * width) + cCol;
// Read in starts
// Slow read from global memory (coalesced ???)
currStart[bidx] = starts[gidx];
startBefore = currStart[bidx].before; // starting location of {L} set
startAfter = currStart[bidx].after; // starting location of {M} set
startEqual = currStart[bidx].equal; // starting location of {R} set
// Compute Start & End Rows
startRow = (start-1) / (unsigned int)(width); // startRow = floor( start/width )
endRow = (end - 1) / (unsigned int)(width); // endRow = ceil( end/width )
leftOver = (end - 1) - (endRow*(unsigned int)(width));
endRow = (leftOver > 0) ? endRow+1 : endRow;
/*-----------------------
Partition elements
-----------------------*/
startIdx = startRow * width + cCol;
currIdx = startIdx;
for (currRow = startRow; currRow <= endRow; currRow++)
{
if ((currIdx < start) || (currIdx > end))
{
// Do nothing, the current element is outside of range [start,end]
}
else
{
// Read node from original location
// Slow read from global memory (coalesced ???)
currNode[bidx] = srcNodes[currIdx];
// Partition node into appropriate location
currVal = currNode[bidx].pos[axis];
if (currVal < pivotVal)
{
outIdx = startBefore;
startBefore++;
}
else if (currVal > pivotVal)
{
outIdx = startAfter;
startAfter++;
}
else
{
outIdx = startEqual;
startEqual++;
}
//__syncthreads();
// Write node to new partitioned location
// Slow write to external memory
dstNodes[outIdx] = currNode[bidx];
//__syncthreads();
}
// Move to next row
currIdx += width;
}
__syncthreads();
}
/*---------------------------------------------------------
Name: GPU_STORE_2D
Desc: Store left balanced median node in LBT node list
---------------------------------------------------------*/
__global__ void
GPU_2D_NODE_STORE
(
GPUNode_2D_MED * medNodes, // IN: Median Nodes are read from this array
GPUNode_2D_LBT * lbtNodes, // OUT: LBT Nodes are stored in this array
unsigned int * pointIDS, // OUT: point indices are stored in this array
unsigned int medianIdx, // IN: left balanced median index
unsigned int targetIdx // IN: Target index
)
{
// Local Parameters (shared memory)
__shared__ GPUNode_2D_MED med;
__shared__ GPUNode_2D_LBT lbt;
// Store current median node
// in left balanced list at target
// Slow read from main memory
med = medNodes[medianIdx];
#ifdef _DEVICEEMU
//fprintf( stdout, "Store, Median=%u, Target=%u, x=%g, y=%g, PIDX=%u\n",
// medianIdx, targetIdx, med.pos[0], med.pos[1], med.m_searchIdx );
#endif
lbt.pos[0] = med.pos[0];
lbt.pos[1] = med.pos[1];
// Slow write to main memory
lbtNodes[targetIdx] = lbt;
pointIDS[targetIdx] = med.m_searchIdx;
}
/*---------------------------------------------------------
Name: GPU_COUNTS_TO_STARTS
Desc: Converts counts to starts
using modified prefix sum (scan) algorithm
Notes: Based on the prefix sum (scan) algorithm
found in the book...
GPU GEMS 3, Chapter 39, on pages 851-875
by Mark Harris, Shubhabrata Sengupta, and John Owens
---------------------------------------------------------*/
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
#define CONFLICT_FREE_OFFSET(n) \
(((n) >> NUM_BANKS) + ((n) >> (2*LOG_NUM_BANKS)))
#define WARP_SIZE 32
#define LOG_WARP_SIZE 5
__global__ void
GPU_2D_COUNTS_TO_STARTS
(
GPU_COUNTS_STARTS * starts, // OUT - start list (store prefix sums here)
GPU_COUNTS_STARTS * counts, // IN - Count list (to total)
unsigned int nCounts, // IN - # of items in count list
unsigned int currStart, // IN - range[start,end]
unsigned int currEnd // ditto
)
{
// Local Memory (Shared Memory)
__shared__ GPU_COUNTS_STARTS sCounts[BUILD_CS_SCAN_MAX_ITEMS];
// Local Memory (Registers)
unsigned int ai, bi, aidx, bidx;
unsigned int d, before, after, equal;
unsigned int offset, bankOffsetA, bankOffsetB;
unsigned int tid, n, n2;
unsigned int baseBefore, baseAfter, baseEqual;
unsigned int totalBefore, totalEqual;
tid = threadIdx.x; // thread ID
n = blockDim.x; // # of threads
n2 = n << 1; // 2*n
offset = 1;
// load input into shared memory
//temp[2*tid] = g_idata[2*tid];
//temp[2*tid+1] = g_idata[2*tid+1];
// Rewriten to avoid bank conflicts
ai = tid;
bi = tid + n;
bankOffsetA = CONFLICT_FREE_OFFSET(ai);
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Read 1st value into shared memory
if (ai < nCounts)
{
sCounts[ai + bankOffsetA] = counts[ai];
}
else
{
// Initialize counts to zero (additive identity)
sCounts[ai + bankOffsetA].before = 0;
sCounts[ai + bankOffsetA].after = 0;
sCounts[ai + bankOffsetA].equal = 0;
}
// Read 2nd value into shared memory
if (bi < nCounts)
{
sCounts[bi + bankOffsetB] = counts[bi];
}
else
{
// Initialize counts to zero (additive identity)
sCounts[bi + bankOffsetB].before = 0;
sCounts[bi + bankOffsetB].after = 0;
sCounts[bi + bankOffsetB].equal = 0;
}
#ifdef _DEVICEEMU
/*
__syncthreads();
if (tid == 0)
{
fprintf( stdout, "Before Reduction\n" );
unsigned int idx;
for (idx = 0; idx < n2; idx++)
{
before = sCounts[idx].before;
after = sCounts[idx].after;
equal = sCounts[idx].equal;
fprintf( stdout, "Counts[%u] = <B=%u, A=%u, E=%u>\n",
idx, before, after, equal );
}
fprintf( stdout, "\n" );
}
__syncthreads();
*/
#endif
// Reduce to Total Sum (Up-sweep)
// by traversing the conceptual binary tree
// in a bottom-up in-place manner
//for (d = n2 >> 1; d > 0; d >>= 1)
for (d = n; d > 0; d >>= 1)
{
__syncthreads(); // Note: We need this here to make sure all threads
// have updated the current level of the
// conceptual binary tree before we move to
// the next level
if (tid < d)
{
unsigned int aidx, bidx;
aidx = offset*(2*tid+1)-1;
bidx = offset*(2*tid+2)-1;
aidx += CONFLICT_FREE_OFFSET( aidx );
bidx += CONFLICT_FREE_OFFSET( bidx );
sCounts[bidx].before += sCounts[aidx].before;
sCounts[bidx].after += sCounts[aidx].after;
sCounts[bidx].equal += sCounts[aidx].equal;
}
offset <<= 1; // offset = offset * 2;
}
//---------------------------------
// Compute totals and base offsets
//---------------------------------
#ifdef _DEVICEEMU
/*
__syncthreads();
if (tid == 0)
{
fprintf( stdout, "After Reduction\n" );
unsigned int idx;
for (idx = 0; idx < n2; idx++)
{
before = sCounts[idx].before;
after = sCounts[idx].after;
equal = sCounts[idx].equal;
fprintf( stdout, "Counts[%u] = <B=%u, A=%u, E=%u>\n",
idx, before, after, equal );
}
fprintf( stdout, "\n" );
}
__syncthreads();
*/
#endif
__syncthreads(); // Note: We need this here to make sure we have the
// correct total counts available to all threads
// Have each thread grab the final total counts to create their bases
aidx = n2-1 + CONFLICT_FREE_OFFSET(n2-1);
totalBefore = sCounts[aidx].before;
//totalAfter = sCounts[aidx].after;
totalEqual = sCounts[aidx].equal;
baseBefore = currStart;
baseEqual = currStart + totalBefore;
baseAfter = currStart + totalBefore + totalEqual;
__syncthreads(); // Note: We need this here to avoid setting last element
// to all zeros before all threads have successfully
// grabbed their correct total counts
if (tid == 0)
{
// Clear the last element
sCounts[aidx].before = 0;
sCounts[aidx].after = 0;
sCounts[aidx].equal = 0;
}
// Build Prefix-sum (Down-sweep)
// by traversing the conceptual binary tree
// in a top-down in-place manner
for (d = 1; d < n2; d <<= 1)
{
offset >>= 1; // offset = offset / 2;
__syncthreads(); // Note:
if (tid < d)
{
aidx = offset*(2*tid+1)-1;
bidx = offset*(2*tid+2)-1;
aidx += CONFLICT_FREE_OFFSET( aidx );
bidx += CONFLICT_FREE_OFFSET( bidx );
// Add in prefix sum
before = sCounts[aidx].before;
after = sCounts[aidx].after;
equal = sCounts[aidx].equal;
sCounts[aidx].before = sCounts[bidx].before;
sCounts[aidx].after = sCounts[bidx].after;
sCounts[aidx].equal = sCounts[bidx].equal;
sCounts[bidx].before += before;
sCounts[bidx].after += after;
sCounts[bidx].equal += equal;
}
}
__syncthreads();
#ifdef _DEVICEEMU
/*
__syncthreads();
if (tid == 0)
{
fprintf( stdout, "After Scan\n" );
unsigned int idx;
for (idx = 0; idx < n2; idx++)
{
before = sCounts[idx].before; // + baseBefore;
after = sCounts[idx].after; // + baseAfter;
equal = sCounts[idx].equal; // + baseEqual;
fprintf( stdout, "Counts[%u] = <B=%u, A=%u, E=%u>\n",
idx, before, after, equal );
}
fprintf( stdout, "\n" );
}
__syncthreads();
*/
#endif
// Store Results to output
//g_odata[2*tid] = temp[2*tid];
//g_odata[2*tid+1] = temp[2*tid+1];
// Add in currStart to each thread
sCounts[ai + bankOffsetA].before += baseBefore;
sCounts[ai + bankOffsetA].after += baseAfter;
sCounts[ai + bankOffsetA].equal += baseEqual;
if (ai < nCounts)
{
// Store result
starts[ai] = sCounts[ai + bankOffsetA];
}
// Add in currStart to each thread
sCounts[bi + bankOffsetB].before += baseBefore;
sCounts[bi + bankOffsetB].after += baseAfter;
sCounts[bi + bankOffsetB].equal += baseEqual;
if (bi < nCounts)
{
// Store result
starts[bi] = sCounts[bi + bankOffsetB];
}
#ifdef _DEVICEEMU
/*
__syncthreads();
if (tid == 127)
{
// Dump Results
fprintf( stdout, "After Scan\n" );
unsigned int idx;
for (idx = 0; idx < n2; idx++)
{
before = sCounts[idx].before; // + baseBefore;
after = sCounts[idx].after; // + baseAfter;
equal = sCounts[idx].equal; // + baseEqual;
fprintf( stdout, "Counts[%u] = <B=%u, A=%u, E=%u>\n",
idx, before, after, equal );
}
fprintf( stdout, "\n" );
// Now Do it the slow but correct way
unsigned int totalBefore, totalAfter, totalEqual;
totalBefore = 0;
totalAfter = 0;
totalEqual = 0;
// Compute Totals
for (idx = 0; idx < nCounts; idx++)
{
totalBefore += counts[idx].before;
totalAfter += counts[idx].after;
totalEqual += counts[idx].equal;
}
// Double check totals are correct
unsigned int totalCount = totalBefore + totalAfter + totalEqual;
unsigned int nRange = currEnd - currStart + 1;
if (totalCount != nRange)
{
// Error - we have a bug
fprintf( stdout, "Count Totals(%d) != Range Size(%d)\n", totalCount, nRange );
//exit( 0 );
}
// Initialize bases for first thread
baseBefore = currStart;
baseEqual = baseBefore + totalBefore;
baseAfter = baseEqual + totalEqual;
unsigned int startBefore = baseBefore;
unsigned int startEqual = baseEqual;
unsigned int startAfter = baseAfter;
// Compute starts from counts and bases
for (idx = 0; idx < nCounts; idx++)
{
// Set starts for current thread
starts[idx].before = startBefore;
starts[idx].after = startAfter;
starts[idx].equal = startEqual;
// Update running starts for next thread
startBefore += counts[idx].before;
startAfter += counts[idx].after;
startEqual += counts[idx].equal;
}
// Validate Fast vs. Slow Starts
unsigned int checkBefore, checkAfter, checkEqual;
fprintf( stdout, "Slow but correct starts\n" );
for (idx = 0; idx < nCounts; idx++)
{
// Get result from fast scan starts
checkBefore = sCounts[idx].before;
checkAfter = sCounts[idx].after;
checkEqual = sCounts[idx].equal;
// Get result from slow
before = starts[idx].before;
after = starts[idx].after;
equal = starts[idx].equal;
if ((checkBefore != before) ||
(checkAfter != after) ||
(checkEqual != equal))
{
fprintf( stdout, "Fast Starts[%u] = <B=%u, A=%u, E=%u>\n",
idx, checkBefore, checkAfter, checkEqual );
fprintf( stdout, "Slow Starts[%u] = <B=%u, A=%u, E=%u>\n",
idx, before, after, equal );
}
else
{
fprintf( stdout, "Match[%u] = <B=%u, A=%u, E=%u>\n",
idx, before, after, equal );
}
}
fprintf( stdout, "\n" );
}
__syncthreads();
*/
#endif
}
|
ee3d89c19bf8cd0cba4de54816e109e0409f3e59.cu
|
/*-----------------------------------------------------------------------------
Name: GPU_PHASE1.cu
Desc: This file contains GPU kernels for building a kd-tree
The kd-nodes are stored in a left balanced layout
Notes:
Kd-tree attributes
static -- we need to know all points "a priori" before building the kd-tree
balanced -- Tree has maximum height of O( log<2> n )
Left-Balanced tree array layout
-- The kd-nodes in the kd-tree are stored in a left-balanced tree layout
-- Given n points, We allocate n+1 nodes
-- The kd-node at index zero is ignored (wasted space)
-- The Root kd-node is always found at index 1
-- Given any node at position 'i'
-- The parent node is found at 'i/2'
-- The left child node is found at '2*i'
-- The right child node is found at '2*i+1'
d-Dimensionality -- 2D, 3D, 4D, ...
cyclical -- we follow a cyclical pattern in switching between axes
at each level of the tree,
for 2D <x,y,x,y,x,y,...>
for 3D <x,y,z,x,y,z,...>
for 4D <x,y,z,w,x,y,z,w,...>
for 6D <x,y,z,w,s,t,x,y,z,w,s,t,...>
etc.
Point Storage -- 1 search point is stored at each internal or leaf node
Minimal -- I have eliminated as many fields as possible
from the final kd-node data structures.
The only remaining field is the stored search point
During the build process, we need some temporary extra fields for tracking.
by Shawn Brown ([email protected])
-----------------------------------------------------------------------------*/
/*---------------------------------------------------------
Includes
---------------------------------------------------------*/
#include <stdio.h>
//#include <float.h>
#include "GPUTREE_API.h"
/*---------------------------------------------------------
Function Definitions
---------------------------------------------------------*/
/*-------------------------------------------------------------------------
Name: GPU_AxisValue
Desc: Helper Method
-------------------------------------------------------------------------*/
__device__ inline float GPU_NODE_2D_MED_AxisValue
(
const GPUNode_2D_MED * currNodes, // IN: IN node list
unsigned int index, // IN: Index of node to retrieve value for
unsigned int axis // IN: axis of value to retrieve
)
{
return currNodes[index].pos[axis];
}
__device__ inline float GPU_NODE_2D_LBT_AxisValue
(
const GPUNode_2D_LBT * currNodes, // IN: IN node list
unsigned int index, // IN: Index of node to retrieve value for
unsigned int axis // IN: axis of value to retrieve
)
{
return currNodes[index].pos[axis];
}
/*-------------------------------------------------------------------------
Name: GPU_Swap
Desc: Helper Method
-------------------------------------------------------------------------*/
__device__ inline void GPU_2D_NODE_MED_Swap
(
GPUNode_2D_MED * currNodes, // IN: Median node list
unsigned int idx1, // IN: Index of 1st node to swap
unsigned int idx2 // IN: Index of 2nd node to swap
)
{
GPUNode_2D_MED temp = currNodes[idx1]; // slow read
currNodes[idx1] = currNodes[idx2]; // slow read and write
currNodes[idx2] = temp; // slow write
}
__device__ inline void GPU_2D_NODE_LBT_Swap
(
GPUNode_2D_LBT * currNodes, // IN: left-balanced node list
unsigned int idx1, // IN: Index of 1st node to swap
unsigned int idx2 // IN: Index of 2nd node to swap
)
{
GPUNode_2D_LBT temp = currNodes[idx1]; // slow read
currNodes[idx1] = currNodes[idx2]; // slow read and write
currNodes[idx2] = temp; // slow write
}
/*-------------------------------------------------------------------------
Name: GPU_MedianOf3
Desc: Helper method,
Implements Median of three variant
for Median Partitioning algorithm
returns pivot value for partitioning algorithm
Note: finds middle element of left, mid, and right
where mid = (left+right)/2
enforces invariant that
array[left].val <= array[mid].val <= array[right].val
-------------------------------------------------------------------------*/
__device__ inline unsigned int GPU_2D_NODE_MED_MedianOf3
(
GPUNode_2D_MED * currNodes, // IN - node list
unsigned int leftIdx, // IN - left index
unsigned int rightIdx, // IN - right index
unsigned int axis // IN - axis to compare
)
{
// Compute Middle Index from left and right
unsigned int middleIdx = (leftIdx+rightIdx)/2;
unsigned int temp;
float leftVal = GPU_NODE_2D_MED_AxisValue( currNodes, leftIdx, axis );
float rightVal = GPU_NODE_2D_MED_AxisValue( currNodes, rightIdx, axis );
float middleVal = GPU_NODE_2D_MED_AxisValue( currNodes, middleIdx, axis );
// Sort left, center, mid values into correct order
if (leftVal > middleVal)
{
// Swap left and middle indices
temp = leftIdx;
leftIdx = middleIdx;
middleIdx = temp;
}
if (leftVal > rightVal)
{
// Swap left and right indices
temp = leftIdx;
leftIdx = rightIdx;
rightIdx = temp;
}
if (middleVal > rightVal)
{
// Swap middle and right indices
temp = middleIdx;
middleIdx = rightIdx;
rightIdx = temp;
}
// return middle as the pivot
return middleIdx;
}
/*---------------------------------------------------------
Name: GPU_PICK_PIVOT
Desc: Count # of nodes in range[start,end]
That are before, after, or equal to pivot value
---------------------------------------------------------*/
__global__ void
GPU_2D_NODE_MED_PICK_PIVOT
(
unsigned int * pivot, // OUT - pivot result
GPUNode_2D_MED * currNodes, // IN - node list
unsigned int start, // IN - range [start,end] to median select
unsigned int end,
unsigned int axis // IN - axis to compare
)
{
// Block thread index (local)
const int bidx = (threadIdx.y*blockDim.x) + threadIdx.x;
if (0 == bidx)
{
pivot[0] = GPU_2D_NODE_MED_MedianOf3( currNodes, start, end, axis );
}
}
/*---------------------------------------------------------
Name: GPU_COUNTS
Desc: Count # of nodes in range[start,end]
That are before, after, or equal to pivot value
---------------------------------------------------------*/
__global__ void
GPU_2D_NODE_MED_COUNTS
(
GPU_COUNTS_STARTS * counts, // OUT: counts
GPUNode_2D_MED * srcNodes, // IN: nodes are read & copied from this list (source)
GPUNode_2D_MED * dstNodes, // OUT: nodes are copied into this list (dest = scratch)
unsigned int * pivot, // IN: pivot location
unsigned int nNodes, // IN: number of nodes
unsigned int start, // IN: start of range to count
unsigned int end, // IN: end of range to count
unsigned int axis // IN: axis of dimension to work with
)
{
__shared__ GPUNode_2D_MED currNode[BUILD_THREADS_PER_BLOCK]; // Current thread starts
__shared__ GPU_COUNTS_STARTS currCount[BUILD_THREADS_PER_BLOCK]; // Current thread starts
// Local Variables (registers)
float pivotVal, currVal;
unsigned int countBefore = 0; // # of elements less than pivot value (x < n[p])
unsigned int countAfter = 0; // # of elements greater than pivot value (x > n[p]
unsigned int countEqual = 0;
unsigned int startRow, endRow, currRow, pivotIdx;
unsigned int startIdx, currIdx, leftOver;
// Read in pivot value
// Slow read from global memory (coalesced ???)
pivotIdx = pivot[0];
pivotVal = srcNodes[pivotIdx].pos[axis];
/*-----------------------
Compute Thread Column
-----------------------*/
// Block thread index (local)
const int bidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Grid thread idx (global)
const int width = (gridDim.x * blockDim.x); // width of a grid row
//const int height = (gridDim.y * blockDim.y); // height of a grid column
const int cRow = (blockIdx.y * blockDim.y) + threadIdx.y; // thread row in grid of blocks
const int cCol = (blockIdx.x * blockDim.x) + threadIdx.x; // thread column in grid of blocks
const int gidx = (cRow * width) + cCol;
// Compute Start & End Rows
startRow = (start-1) / (unsigned int)(width); // startRow = floor( start/width )
endRow = (end - 1) / (unsigned int)(width); // endRow = ceil( end/width )
leftOver = (end - 1) - (endRow*(unsigned int)(width));
endRow = (leftOver > 0) ? endRow+1 : endRow;
/*-----------------------
count elements
-----------------------*/
startIdx = startRow * (unsigned int)(width) + (unsigned int)cCol;
currIdx = startIdx;
for (currRow = startRow; currRow <= endRow; currRow++)
{
if ((currIdx < start) || (currIdx > end))
{
// Do nothing, the current element is outside of the range [start,end]
}
else
{
// Get current value
// Slow read from global memory (coalesced ???)
currNode[bidx] = srcNodes[currIdx];
// Count # of values before and after pivot
currVal = currNode[bidx].pos[axis];
if (currVal < pivotVal)
{
countBefore++;
}
else if (currVal > pivotVal)
{
countAfter++;
}
else
{
countEqual++;
}
// Write node to scratch buffer
// Slow write to external memory (coalesced ???)
dstNodes[currIdx] = currNode[bidx];
}
// Move to next row
currIdx += width;
}
// Store counts (shared memory)
currCount[bidx].before = countBefore;
currCount[bidx].after = countAfter;
currCount[bidx].equal = countEqual;
// Store counts (global memory)
// Slow write to global memory (coalesced ???)
counts[gidx] = currCount[bidx];
}
/*---------------------------------------------------------
Name: GPU_PARTITION_2D
Desc: Partitions original data set {O}=[start,end]
with 'n' elements into 3 datasets <{l}, {m}, {r}}
That are before, after, or equal to pivot value.
---------------------------------------------------------*/
__global__ void
GPU_2D_NODE_MED_PARTITION
(
GPU_COUNTS_STARTS * starts, // OUT: starts
GPUNode_2D_MED * srcNodes, // IN/OUT: Nodes are read from this list (source = scratch)
GPUNode_2D_MED * dstNodes, // IN/OUT: Nodes are partitioned into this list (dest)
unsigned int nNodes, // IN: number of nodes
unsigned int start, // IN: start of range to partition
unsigned int end, // IN: end of range to partition
unsigned int axis, // IN: axis of dimension to work with
unsigned int * pivot // IN: pivot index
)
{
// Local Parameters (shared memory)
__shared__ GPUNode_2D_MED currNode[BUILD_THREADS_PER_BLOCK]; // Current thread starts
__shared__ GPU_COUNTS_STARTS currStart[BUILD_THREADS_PER_BLOCK]; // Current thread starts
// BUGBUG: need to write
// Local Variables (registers)
float pivotVal, currVal;
unsigned int startBefore, startAfter, startEqual, pivotIdx;
unsigned int startRow, endRow, currRow, leftOver, outIdx;
unsigned int startIdx, currIdx;
// Read in pivot value
// Slow read from global memory (coalesced ???)
pivotIdx = pivot[0];
pivotVal = srcNodes[pivotIdx].pos[axis];
/*-----------------------
Compute Thread Column
-----------------------*/
// Block thread index (local)
const int bidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Grid thread idx (global)
const int width = (gridDim.x * blockDim.x); // width of a row
//const int height = (gridDim.y * blockDim.y); // # max threads in a column
//const int maxElems = (gW * width); // # max threads in grid of blocks
const int cRow = (blockIdx.y * blockDim.y) + threadIdx.y; // thread row in grid of blocks
const int cCol = (blockIdx.x * blockDim.x) + threadIdx.x; // thread column in grid of blocks
const int gidx = (cRow * width) + cCol;
// Read in starts
// Slow read from global memory (coalesced ???)
currStart[bidx] = starts[gidx];
startBefore = currStart[bidx].before; // starting location of {L} set
startAfter = currStart[bidx].after; // starting location of {M} set
startEqual = currStart[bidx].equal; // starting location of {R} set
// Compute Start & End Rows
startRow = (start-1) / (unsigned int)(width); // startRow = floor( start/width )
endRow = (end - 1) / (unsigned int)(width); // endRow = ceil( end/width )
leftOver = (end - 1) - (endRow*(unsigned int)(width));
endRow = (leftOver > 0) ? endRow+1 : endRow;
/*-----------------------
Partition elements
-----------------------*/
startIdx = startRow * width + cCol;
currIdx = startIdx;
for (currRow = startRow; currRow <= endRow; currRow++)
{
if ((currIdx < start) || (currIdx > end))
{
// Do nothing, the current element is outside of range [start,end]
}
else
{
// Read node from original location
// Slow read from global memory (coalesced ???)
currNode[bidx] = srcNodes[currIdx];
// Partition node into appropriate location
currVal = currNode[bidx].pos[axis];
if (currVal < pivotVal)
{
outIdx = startBefore;
startBefore++;
}
else if (currVal > pivotVal)
{
outIdx = startAfter;
startAfter++;
}
else
{
outIdx = startEqual;
startEqual++;
}
//__syncthreads();
// Write node to new partitioned location
// Slow write to external memory
dstNodes[outIdx] = currNode[bidx];
//__syncthreads();
}
// Move to next row
currIdx += width;
}
__syncthreads();
}
/*---------------------------------------------------------
Name: GPU_STORE_2D
Desc: Store left balanced median node in LBT node list
---------------------------------------------------------*/
__global__ void
GPU_2D_NODE_STORE
(
GPUNode_2D_MED * medNodes, // IN: Median Nodes are read from this array
GPUNode_2D_LBT * lbtNodes, // OUT: LBT Nodes are stored in this array
unsigned int * pointIDS, // OUT: point indices are stored in this array
unsigned int medianIdx, // IN: left balanced median index
unsigned int targetIdx // IN: Target index
)
{
// Local Parameters (shared memory)
__shared__ GPUNode_2D_MED med;
__shared__ GPUNode_2D_LBT lbt;
// Store current median node
// in left balanced list at target
// Slow read from main memory
med = medNodes[medianIdx];
#ifdef _DEVICEEMU
//fprintf( stdout, "Store, Median=%u, Target=%u, x=%g, y=%g, PIDX=%u\n",
// medianIdx, targetIdx, med.pos[0], med.pos[1], med.m_searchIdx );
#endif
lbt.pos[0] = med.pos[0];
lbt.pos[1] = med.pos[1];
// Slow write to main memory
lbtNodes[targetIdx] = lbt;
pointIDS[targetIdx] = med.m_searchIdx;
}
/*---------------------------------------------------------
Name: GPU_COUNTS_TO_STARTS
Desc: Converts counts to starts
using modified prefix sum (scan) algorithm
Notes: Based on the prefix sum (scan) algorithm
found in the book...
GPU GEMS 3, Chapter 39, on pages 851-875
by Mark Harris, Shubhabrata Sengupta, and John Owens
---------------------------------------------------------*/
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
#define CONFLICT_FREE_OFFSET(n) \
(((n) >> NUM_BANKS) + ((n) >> (2*LOG_NUM_BANKS)))
#define WARP_SIZE 32
#define LOG_WARP_SIZE 5
__global__ void
GPU_2D_COUNTS_TO_STARTS
(
GPU_COUNTS_STARTS * starts, // OUT - start list (store prefix sums here)
GPU_COUNTS_STARTS * counts, // IN - Count list (to total)
unsigned int nCounts, // IN - # of items in count list
unsigned int currStart, // IN - range[start,end]
unsigned int currEnd // ditto
)
{
// Local Memory (Shared Memory)
__shared__ GPU_COUNTS_STARTS sCounts[BUILD_CS_SCAN_MAX_ITEMS];
// Local Memory (Registers)
unsigned int ai, bi, aidx, bidx;
unsigned int d, before, after, equal;
unsigned int offset, bankOffsetA, bankOffsetB;
unsigned int tid, n, n2;
unsigned int baseBefore, baseAfter, baseEqual;
unsigned int totalBefore, totalEqual;
tid = threadIdx.x; // thread ID
n = blockDim.x; // # of threads
n2 = n << 1; // 2*n
offset = 1;
// load input into shared memory
//temp[2*tid] = g_idata[2*tid];
//temp[2*tid+1] = g_idata[2*tid+1];
// Rewriten to avoid bank conflicts
ai = tid;
bi = tid + n;
bankOffsetA = CONFLICT_FREE_OFFSET(ai);
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Read 1st value into shared memory
if (ai < nCounts)
{
sCounts[ai + bankOffsetA] = counts[ai];
}
else
{
// Initialize counts to zero (additive identity)
sCounts[ai + bankOffsetA].before = 0;
sCounts[ai + bankOffsetA].after = 0;
sCounts[ai + bankOffsetA].equal = 0;
}
// Read 2nd value into shared memory
if (bi < nCounts)
{
sCounts[bi + bankOffsetB] = counts[bi];
}
else
{
// Initialize counts to zero (additive identity)
sCounts[bi + bankOffsetB].before = 0;
sCounts[bi + bankOffsetB].after = 0;
sCounts[bi + bankOffsetB].equal = 0;
}
#ifdef _DEVICEEMU
/*
__syncthreads();
if (tid == 0)
{
fprintf( stdout, "Before Reduction\n" );
unsigned int idx;
for (idx = 0; idx < n2; idx++)
{
before = sCounts[idx].before;
after = sCounts[idx].after;
equal = sCounts[idx].equal;
fprintf( stdout, "Counts[%u] = <B=%u, A=%u, E=%u>\n",
idx, before, after, equal );
}
fprintf( stdout, "\n" );
}
__syncthreads();
*/
#endif
// Reduce to Total Sum (Up-sweep)
// by traversing the conceptual binary tree
// in a bottom-up in-place manner
//for (d = n2 >> 1; d > 0; d >>= 1)
for (d = n; d > 0; d >>= 1)
{
__syncthreads(); // Note: We need this here to make sure all threads
// have updated the current level of the
// conceptual binary tree before we move to
// the next level
if (tid < d)
{
unsigned int aidx, bidx;
aidx = offset*(2*tid+1)-1;
bidx = offset*(2*tid+2)-1;
aidx += CONFLICT_FREE_OFFSET( aidx );
bidx += CONFLICT_FREE_OFFSET( bidx );
sCounts[bidx].before += sCounts[aidx].before;
sCounts[bidx].after += sCounts[aidx].after;
sCounts[bidx].equal += sCounts[aidx].equal;
}
offset <<= 1; // offset = offset * 2;
}
//---------------------------------
// Compute totals and base offsets
//---------------------------------
#ifdef _DEVICEEMU
/*
__syncthreads();
if (tid == 0)
{
fprintf( stdout, "After Reduction\n" );
unsigned int idx;
for (idx = 0; idx < n2; idx++)
{
before = sCounts[idx].before;
after = sCounts[idx].after;
equal = sCounts[idx].equal;
fprintf( stdout, "Counts[%u] = <B=%u, A=%u, E=%u>\n",
idx, before, after, equal );
}
fprintf( stdout, "\n" );
}
__syncthreads();
*/
#endif
__syncthreads(); // Note: We need this here to make sure we have the
// correct total counts available to all threads
// Have each thread grab the final total counts to create their bases
aidx = n2-1 + CONFLICT_FREE_OFFSET(n2-1);
totalBefore = sCounts[aidx].before;
//totalAfter = sCounts[aidx].after;
totalEqual = sCounts[aidx].equal;
baseBefore = currStart;
baseEqual = currStart + totalBefore;
baseAfter = currStart + totalBefore + totalEqual;
__syncthreads(); // Note: We need this here to avoid setting last element
// to all zeros before all threads have successfully
// grabbed their correct total counts
if (tid == 0)
{
// Clear the last element
sCounts[aidx].before = 0;
sCounts[aidx].after = 0;
sCounts[aidx].equal = 0;
}
// Build Prefix-sum (Down-sweep)
// by traversing the conceptual binary tree
// in a top-down in-place manner
for (d = 1; d < n2; d <<= 1)
{
offset >>= 1; // offset = offset / 2;
__syncthreads(); // Note:
if (tid < d)
{
aidx = offset*(2*tid+1)-1;
bidx = offset*(2*tid+2)-1;
aidx += CONFLICT_FREE_OFFSET( aidx );
bidx += CONFLICT_FREE_OFFSET( bidx );
// Add in prefix sum
before = sCounts[aidx].before;
after = sCounts[aidx].after;
equal = sCounts[aidx].equal;
sCounts[aidx].before = sCounts[bidx].before;
sCounts[aidx].after = sCounts[bidx].after;
sCounts[aidx].equal = sCounts[bidx].equal;
sCounts[bidx].before += before;
sCounts[bidx].after += after;
sCounts[bidx].equal += equal;
}
}
__syncthreads();
#ifdef _DEVICEEMU
/*
__syncthreads();
if (tid == 0)
{
fprintf( stdout, "After Scan\n" );
unsigned int idx;
for (idx = 0; idx < n2; idx++)
{
before = sCounts[idx].before; // + baseBefore;
after = sCounts[idx].after; // + baseAfter;
equal = sCounts[idx].equal; // + baseEqual;
fprintf( stdout, "Counts[%u] = <B=%u, A=%u, E=%u>\n",
idx, before, after, equal );
}
fprintf( stdout, "\n" );
}
__syncthreads();
*/
#endif
// Store Results to output
//g_odata[2*tid] = temp[2*tid];
//g_odata[2*tid+1] = temp[2*tid+1];
// Add in currStart to each thread
sCounts[ai + bankOffsetA].before += baseBefore;
sCounts[ai + bankOffsetA].after += baseAfter;
sCounts[ai + bankOffsetA].equal += baseEqual;
if (ai < nCounts)
{
// Store result
starts[ai] = sCounts[ai + bankOffsetA];
}
// Add in currStart to each thread
sCounts[bi + bankOffsetB].before += baseBefore;
sCounts[bi + bankOffsetB].after += baseAfter;
sCounts[bi + bankOffsetB].equal += baseEqual;
if (bi < nCounts)
{
// Store result
starts[bi] = sCounts[bi + bankOffsetB];
}
#ifdef _DEVICEEMU
/*
__syncthreads();
if (tid == 127)
{
// Dump Results
fprintf( stdout, "After Scan\n" );
unsigned int idx;
for (idx = 0; idx < n2; idx++)
{
before = sCounts[idx].before; // + baseBefore;
after = sCounts[idx].after; // + baseAfter;
equal = sCounts[idx].equal; // + baseEqual;
fprintf( stdout, "Counts[%u] = <B=%u, A=%u, E=%u>\n",
idx, before, after, equal );
}
fprintf( stdout, "\n" );
// Now Do it the slow but correct way
unsigned int totalBefore, totalAfter, totalEqual;
totalBefore = 0;
totalAfter = 0;
totalEqual = 0;
// Compute Totals
for (idx = 0; idx < nCounts; idx++)
{
totalBefore += counts[idx].before;
totalAfter += counts[idx].after;
totalEqual += counts[idx].equal;
}
// Double check totals are correct
unsigned int totalCount = totalBefore + totalAfter + totalEqual;
unsigned int nRange = currEnd - currStart + 1;
if (totalCount != nRange)
{
// Error - we have a bug
fprintf( stdout, "Count Totals(%d) != Range Size(%d)\n", totalCount, nRange );
//exit( 0 );
}
// Initialize bases for first thread
baseBefore = currStart;
baseEqual = baseBefore + totalBefore;
baseAfter = baseEqual + totalEqual;
unsigned int startBefore = baseBefore;
unsigned int startEqual = baseEqual;
unsigned int startAfter = baseAfter;
// Compute starts from counts and bases
for (idx = 0; idx < nCounts; idx++)
{
// Set starts for current thread
starts[idx].before = startBefore;
starts[idx].after = startAfter;
starts[idx].equal = startEqual;
// Update running starts for next thread
startBefore += counts[idx].before;
startAfter += counts[idx].after;
startEqual += counts[idx].equal;
}
// Validate Fast vs. Slow Starts
unsigned int checkBefore, checkAfter, checkEqual;
fprintf( stdout, "Slow but correct starts\n" );
for (idx = 0; idx < nCounts; idx++)
{
// Get result from fast scan starts
checkBefore = sCounts[idx].before;
checkAfter = sCounts[idx].after;
checkEqual = sCounts[idx].equal;
// Get result from slow
before = starts[idx].before;
after = starts[idx].after;
equal = starts[idx].equal;
if ((checkBefore != before) ||
(checkAfter != after) ||
(checkEqual != equal))
{
fprintf( stdout, "Fast Starts[%u] = <B=%u, A=%u, E=%u>\n",
idx, checkBefore, checkAfter, checkEqual );
fprintf( stdout, "Slow Starts[%u] = <B=%u, A=%u, E=%u>\n",
idx, before, after, equal );
}
else
{
fprintf( stdout, "Match[%u] = <B=%u, A=%u, E=%u>\n",
idx, before, after, equal );
}
}
fprintf( stdout, "\n" );
}
__syncthreads();
*/
#endif
}
|
cb2ce1993a9b1e670a4ecdd0f68bd3f0f700b032.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <math.h>
#include <stdlib.h>
#define MIN(a,b) ({typeof(a)_a=(a);typeof(a)_b=(b);_a<_b?_a:_b;})
#define MAX(a,b) ({typeof(a)_a=(a);typeof(a)_b=(b);_a>_b?_a:_b;})
__device__ const int mxar[4]={-1,1,0,0};
__device__ const int myar[4]={0,0,-1,1};
//TODO: fix mysterious size in range [17-32] bug with really large values
// hiding in devPtr after init_value, until then don't use h/w in that range
// has nothing to do with that range, kinda random though, also never observed on 8? longer sims seem more likely to be affected?
//float beta_t=0.3;
//https://stackoverflow.com/questions/18501081/generating-random-number-within-cuda-kernel-in-a-varying-range
//source of hiprand
__global__ void init_value(int *grd,int val,int pitch,int w,int h){
int gx=16*blockIdx.x+threadIdx.x;int gy=16*blockIdx.y+threadIdx.y;
if (gx>=0 and gx<w and gy>=0 and gy<h){
int loc=gx*pitch+gy;
grd[loc]=val;
}
//__syncthreads();
//if(gx>=0 and gx<w and gy>=0 and gy<h){
// if(MAX(grd[gy*pitch+gx],-grd[gy*pitch+gx])>2){
// grd[gy*pitch+gx]+=1;
// }
//}
__syncthreads();
}
__global__ void setup_kernel ( hiprandState_t * state, unsigned long seed,int n )
{
int id = threadIdx.x+n*threadIdx.y;
hiprand_init ( seed+id, id, 0, &state[id] );
__syncthreads();
}
__global__ void monte_run(int *grd,int pitch,int *acc,int offx,int offy,float beta,hiprandState_t* globalState,int dx, int dy,int n,int w,int h){
int x=threadIdx.x; int y=threadIdx.y;
int gx=x*dx+offx;int gy=y*dy+offy;
int id = threadIdx.x+n*threadIdx.y;
hiprandState_t localState=globalState[id];
int lsum=0;
for(int i=0;i<4;i++){
int tx=gx+mxar[i];
int ty=gy+myar[i];
if(tx>=0 and tx<w and ty>=0 and ty<h){
lsum+=grd[ty*pitch+tx];
//if(MAX(grd[ty*pitch+tx],-grd[ty*pitch+tx])>2){
// grd[ty*pitch+tx]+=1;
//}
}
}
float p;
if(lsum*grd[gy*pitch+gx]>0){
p=-lsum*grd[gy*pitch+gx]*beta;
p=expf(p);
}else{
p=1.0;
}
float rnd=hiprand_uniform(&localState);
globalState[id]=localState;
if (rnd<p){
acc[id]=lsum*grd[gy*pitch+gx];
grd[gy*pitch+gx]=-grd[gy*pitch+gx];
}else{
acc[id]=0;
}
__syncthreads();
}
// this only works for one block, see below for more blocks implementation
//https://gist.github.com/wh5a/4424992
__global__ void reduce_dif(int *nums, int *acc,int ind, int n){
int x=threadIdx.x;
int l=n*n;
for(int stride=(n*n+1)>>1;stride>=1;stride=(stride+1)>>1){
__syncthreads();
if(x+stride<l){
nums[x]+=nums[x+stride];
}
l=(l+1)>>1;
if(stride==1){
stride=0;
}
}
if (x==0){
acc[ind]=nums[0];
}
__syncthreads();
}
int* run_sim(float beta,int w,int h,int steps){
int *hist;
hist= (int *) malloc(steps*sizeof(int));
int* devPtr;
//size_t pitch;
//hipMallocPitch(&devPtr,&pitch,w*sizeof(int),h);
hipMalloc(&devPtr,w*h*sizeof(int));
hipMemset(devPtr,0,w*h*sizeof(int));
dim3 set_grid_size((w-1)/16+1,(h-1)/16+1);
dim3 set_block_size(16,16);
hipLaunchKernelGGL(( init_value), dim3(set_grid_size),dim3(set_block_size), 0, 0, devPtr,1,w,w,h);
hipDeviceSynchronize();
const int accsize=sizeof(int)*steps;
int* accPtr;
hipMalloc(&accPtr,accsize);
hipMemset(accPtr,0,steps*sizeof(int));
int n=MIN(16,MIN(w/2,h/2));
int* monte_acc_a;
int* monte_acc_b;
hipMalloc(&monte_acc_a,n*n*sizeof(int));
hipMalloc(&monte_acc_b,n*n*sizeof(int));
dim3 gridsize(1,1);
dim3 blocksize(n,n);
hiprandState_t* devStates;
hipMalloc( &devStates, n*n*sizeof( hiprandState_t ) );
hipLaunchKernelGGL(( setup_kernel) , dim3(gridsize), dim3(blocksize) , 0, 0, devStates, time(NULL) ,n);
int dx=w/n;
int dy=h/n;
int mdx=dx+w%n;
int mdy=dy+h%n;
srand(time(NULL));
int tdx= rand()%mdx;
int tdy= rand()%mdy;
hipDeviceSynchronize();
hipLaunchKernelGGL(( monte_run) , dim3(gridsize),dim3(blocksize) , 0, 0, devPtr,w,monte_acc_a,tdx,tdy,beta,devStates,dx,dy,n,w,h);
tdx= rand()%mdx;
tdy= rand()%mdy;
hipDeviceSynchronize();
for(int i=0;i<(steps/2)-1;i++){
hipLaunchKernelGGL(( reduce_dif) , dim3(1),dim3(n*n) , 0, 0, monte_acc_a,accPtr,2*i,n);
hipLaunchKernelGGL(( monte_run) , dim3(gridsize),dim3(blocksize) , 0, 0, devPtr,w,monte_acc_b,tdx,tdy,beta,devStates,dx,dy,n,w,h);
tdx= rand()%mdx;
tdy= rand()%mdy;
hipDeviceSynchronize();
hipLaunchKernelGGL(( reduce_dif) , dim3(1),dim3(n*n) , 0, 0, monte_acc_b,accPtr,2*i+1,n);
hipLaunchKernelGGL(( monte_run) , dim3(gridsize),dim3(blocksize) , 0, 0, devPtr,w,monte_acc_a,tdx,tdy,beta,devStates,dx,dy,n,w,h);
tdx= rand()%mdx;
tdy= rand()%mdy;
hipDeviceSynchronize();
}
hipLaunchKernelGGL(( reduce_dif) , dim3(1),dim3(n*n) , 0, 0, monte_acc_a,accPtr,steps-2,n);
hipLaunchKernelGGL(( monte_run) , dim3(gridsize),dim3(blocksize) , 0, 0, devPtr,w,monte_acc_b,tdx,tdy,beta,devStates,dx,dy,n,w,h);
tdx= rand()%mdx;
tdy= rand()%mdy;
hipDeviceSynchronize();
hipLaunchKernelGGL(( reduce_dif) , dim3(1),dim3(n*n) , 0, 0, monte_acc_b,accPtr,steps-1,n);
hipMemcpy(hist,accPtr,accsize,hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipFree(devStates);
hipFree(monte_acc_a);
hipFree(monte_acc_b);
hipFree(accPtr);
hipFree(devPtr);
return hist;
}
int main(int argc, char **argv){
float t_min;
float t_max;
float t_steps;
int steps;
int * size_l;
int l;
size_l=(int*)malloc((MAX(1,argc-5))*sizeof(int));
if (argc>=6){
l=argc-5;
for(int i=5;i<argc;i++){
size_l[i-5]=atoi(argv[i]);
}
steps=atoi(argv[1]);
t_min=atof(argv[2]);
t_max=atof(argv[3]);
t_steps=atoi(argv[4]);
}else{
l=1;
steps=(100);
t_min=3;
t_max=5;
t_steps=5;
size_l[0]=4;
}
for(int i=0;i<l;i++){
int h=size_l[i];int w=size_l[i];
for(int t_step=0;t_step<t_steps;t_step++){
float t=t_min+t_step*(t_max-t_min)/(t_steps-1);
float beta=1/t;
int *res=run_sim(beta,w,h,steps);
FILE *fp;
char *fname=(char*)malloc(100*sizeof(char));
sprintf(fname,"data/ising_l_%d_t_%f.txt",w,t);
fp=fopen(fname,"w");
if(fp == NULL){
exit(-1);
}
for (int i=0;i<steps;i++){
fprintf(fp,"%d\n",res[i]);
}
fclose(fp);
free(res);
}
}
free(size_l);
return EXIT_SUCCESS;
}
|
cb2ce1993a9b1e670a4ecdd0f68bd3f0f700b032.cu
|
#include <stdio.h>
#include <curand.h>
#include <curand_kernel.h>
#include <math.h>
#include <stdlib.h>
#define MIN(a,b) ({typeof(a)_a=(a);typeof(a)_b=(b);_a<_b?_a:_b;})
#define MAX(a,b) ({typeof(a)_a=(a);typeof(a)_b=(b);_a>_b?_a:_b;})
__device__ const int mxar[4]={-1,1,0,0};
__device__ const int myar[4]={0,0,-1,1};
//TODO: fix mysterious size in range [17-32] bug with really large values
// hiding in devPtr after init_value, until then don't use h/w in that range
// has nothing to do with that range, kinda random though, also never observed on 8? longer sims seem more likely to be affected?
//float beta_t=0.3;
//https://stackoverflow.com/questions/18501081/generating-random-number-within-cuda-kernel-in-a-varying-range
//source of curand
__global__ void init_value(int *grd,int val,int pitch,int w,int h){
int gx=16*blockIdx.x+threadIdx.x;int gy=16*blockIdx.y+threadIdx.y;
if (gx>=0 and gx<w and gy>=0 and gy<h){
int loc=gx*pitch+gy;
grd[loc]=val;
}
//__syncthreads();
//if(gx>=0 and gx<w and gy>=0 and gy<h){
// if(MAX(grd[gy*pitch+gx],-grd[gy*pitch+gx])>2){
// grd[gy*pitch+gx]+=1;
// }
//}
__syncthreads();
}
__global__ void setup_kernel ( curandState * state, unsigned long seed,int n )
{
int id = threadIdx.x+n*threadIdx.y;
curand_init ( seed+id, id, 0, &state[id] );
__syncthreads();
}
__global__ void monte_run(int *grd,int pitch,int *acc,int offx,int offy,float beta,curandState* globalState,int dx, int dy,int n,int w,int h){
int x=threadIdx.x; int y=threadIdx.y;
int gx=x*dx+offx;int gy=y*dy+offy;
int id = threadIdx.x+n*threadIdx.y;
curandState localState=globalState[id];
int lsum=0;
for(int i=0;i<4;i++){
int tx=gx+mxar[i];
int ty=gy+myar[i];
if(tx>=0 and tx<w and ty>=0 and ty<h){
lsum+=grd[ty*pitch+tx];
//if(MAX(grd[ty*pitch+tx],-grd[ty*pitch+tx])>2){
// grd[ty*pitch+tx]+=1;
//}
}
}
float p;
if(lsum*grd[gy*pitch+gx]>0){
p=-lsum*grd[gy*pitch+gx]*beta;
p=expf(p);
}else{
p=1.0;
}
float rnd=curand_uniform(&localState);
globalState[id]=localState;
if (rnd<p){
acc[id]=lsum*grd[gy*pitch+gx];
grd[gy*pitch+gx]=-grd[gy*pitch+gx];
}else{
acc[id]=0;
}
__syncthreads();
}
// this only works for one block, see below for more blocks implementation
//https://gist.github.com/wh5a/4424992
__global__ void reduce_dif(int *nums, int *acc,int ind, int n){
int x=threadIdx.x;
int l=n*n;
for(int stride=(n*n+1)>>1;stride>=1;stride=(stride+1)>>1){
__syncthreads();
if(x+stride<l){
nums[x]+=nums[x+stride];
}
l=(l+1)>>1;
if(stride==1){
stride=0;
}
}
if (x==0){
acc[ind]=nums[0];
}
__syncthreads();
}
int* run_sim(float beta,int w,int h,int steps){
int *hist;
hist= (int *) malloc(steps*sizeof(int));
int* devPtr;
//size_t pitch;
//cudaMallocPitch(&devPtr,&pitch,w*sizeof(int),h);
cudaMalloc(&devPtr,w*h*sizeof(int));
cudaMemset(devPtr,0,w*h*sizeof(int));
dim3 set_grid_size((w-1)/16+1,(h-1)/16+1);
dim3 set_block_size(16,16);
init_value<<<set_grid_size,set_block_size>>>(devPtr,1,w,w,h);
cudaDeviceSynchronize();
const int accsize=sizeof(int)*steps;
int* accPtr;
cudaMalloc(&accPtr,accsize);
cudaMemset(accPtr,0,steps*sizeof(int));
int n=MIN(16,MIN(w/2,h/2));
int* monte_acc_a;
int* monte_acc_b;
cudaMalloc(&monte_acc_a,n*n*sizeof(int));
cudaMalloc(&monte_acc_b,n*n*sizeof(int));
dim3 gridsize(1,1);
dim3 blocksize(n,n);
curandState* devStates;
cudaMalloc( &devStates, n*n*sizeof( curandState ) );
setup_kernel <<< gridsize, blocksize >>> ( devStates, time(NULL) ,n);
int dx=w/n;
int dy=h/n;
int mdx=dx+w%n;
int mdy=dy+h%n;
srand(time(NULL));
int tdx= rand()%mdx;
int tdy= rand()%mdy;
cudaDeviceSynchronize();
monte_run <<< gridsize,blocksize >>>(devPtr,w,monte_acc_a,tdx,tdy,beta,devStates,dx,dy,n,w,h);
tdx= rand()%mdx;
tdy= rand()%mdy;
cudaDeviceSynchronize();
for(int i=0;i<(steps/2)-1;i++){
reduce_dif <<< 1,n*n >>>(monte_acc_a,accPtr,2*i,n);
monte_run <<< gridsize,blocksize >>>(devPtr,w,monte_acc_b,tdx,tdy,beta,devStates,dx,dy,n,w,h);
tdx= rand()%mdx;
tdy= rand()%mdy;
cudaDeviceSynchronize();
reduce_dif <<< 1,n*n >>>(monte_acc_b,accPtr,2*i+1,n);
monte_run <<< gridsize,blocksize >>>(devPtr,w,monte_acc_a,tdx,tdy,beta,devStates,dx,dy,n,w,h);
tdx= rand()%mdx;
tdy= rand()%mdy;
cudaDeviceSynchronize();
}
reduce_dif <<< 1,n*n >>>(monte_acc_a,accPtr,steps-2,n);
monte_run <<< gridsize,blocksize >>>(devPtr,w,monte_acc_b,tdx,tdy,beta,devStates,dx,dy,n,w,h);
tdx= rand()%mdx;
tdy= rand()%mdy;
cudaDeviceSynchronize();
reduce_dif <<< 1,n*n >>>(monte_acc_b,accPtr,steps-1,n);
cudaMemcpy(hist,accPtr,accsize,cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(devStates);
cudaFree(monte_acc_a);
cudaFree(monte_acc_b);
cudaFree(accPtr);
cudaFree(devPtr);
return hist;
}
int main(int argc, char **argv){
float t_min;
float t_max;
float t_steps;
int steps;
int * size_l;
int l;
size_l=(int*)malloc((MAX(1,argc-5))*sizeof(int));
if (argc>=6){
l=argc-5;
for(int i=5;i<argc;i++){
size_l[i-5]=atoi(argv[i]);
}
steps=atoi(argv[1]);
t_min=atof(argv[2]);
t_max=atof(argv[3]);
t_steps=atoi(argv[4]);
}else{
l=1;
steps=(100);
t_min=3;
t_max=5;
t_steps=5;
size_l[0]=4;
}
for(int i=0;i<l;i++){
int h=size_l[i];int w=size_l[i];
for(int t_step=0;t_step<t_steps;t_step++){
float t=t_min+t_step*(t_max-t_min)/(t_steps-1);
float beta=1/t;
int *res=run_sim(beta,w,h,steps);
FILE *fp;
char *fname=(char*)malloc(100*sizeof(char));
sprintf(fname,"data/ising_l_%d_t_%f.txt",w,t);
fp=fopen(fname,"w");
if(fp == NULL){
exit(-1);
}
for (int i=0;i<steps;i++){
fprintf(fp,"%d\n",res[i]);
}
fclose(fp);
free(res);
}
}
free(size_l);
return EXIT_SUCCESS;
}
|
20d11d00b5251fb37d75fdc397a10be64ae37d66.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/device_functions.h>
#include <helper_math.h>
#include <GL/glew.h>
#include <GL/freeglut.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include <iostream>
#include <time.h>
#include <timer.h>
#include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h
#define width 1280 //screen width
#define height 720 //screen height
#define MAX_STEP 400
#define MAX_DIST 100.
#define PRECISION 0.01
#define OBJ_IN_SCENE 10
#define N_THREAD 16
#define TopColor make_float3( 0.35, 0.4, 0.8 )
#define MiddleColor make_float3( 0.8, 0.8, 0.8 )
#define BottomColor make_float3( 0.8, 0.4, 0.33 )
#define REFRESH_DELAY 10 //ms
double t = 0.0f; //timer
float3* device; //pointer to memory on the device (GPU VRAM)
GLuint buffer; //buffer
StopWatchInterface* timer = NULL;
float fpsCount = 0;
float fpsLimit = 1;
double startFrame;
double endFrame;
int frameCount = 0;
float FPS;
//-----------------------
struct RM {
float dist;
float travel;
};
struct PARAM
{
int movingCam;
int plane_in_scene;
int obj_in_scene;
int move;
};
struct Blob
{
int shape;//sphere, cube, tourus
float3 o_position;
float3 position;
float3 color;
float size;
int oper; //union, substraction, intersection
int morph;
float3 movement;
int isMoving;
};
__constant__ __device__ Blob blobs_device[10];
Blob blobs_host[10];
__constant__ __device__ PARAM param_device[1];
PARAM param_host[1];
//--------------------------------------------------------------- DEVICE
//################ BASIC FUNC
__device__ float mix(float a, float b, float x)
{
return a * (1 - x) + b * x;
}
__device__ float3 mix(float3 a, float3 b, float x)
{
float r = mix(a.x, b.y, x);
float g = mix(a.y, b.y, x);
float bb = mix(a.z, a.z, x);
return make_float3(r, g, bb);
}
__device__ float clamp_f(float x, float min_v, float max_v)
{
return min(max(x, min_v), max_v);
}
__device__ float3 abs(float3 vec)
{
float3 r;
r.x = vec.x * ((vec.x < 0) * (-1) + (vec.x > 0));
r.y = vec.y * ((vec.y < 0) * (-1) + (vec.y > 0));
r.z = vec.z * ((vec.z < 0) * (-1) + (vec.z > 0));
return r;
}
//################ OPERATORS
__device__ float smoothUnion(float d1, float d2, float k) {
float h = clamp_f(0.5 + 0.5 * (d2 - d1) / k, 0.0, 1.0);
return mix(d2, d1, h) - k * h * (1.0 - h);
}
__device__ float smoothSubtraction(float d1, float d2, float k) {
float h = clamp_f(0.5 - 0.5 * (d2 + d1) / k, 0.0, 1.0);
return mix(d2, -d1, h) + k * h * (1.0 - h);
}
__device__ float changeShape(float dist1, float dist2, float time) //remember this is the k value in raymarching or t in mapping
{
return mix(dist1, dist2, sin(time) * .5 + .5);
}
//################ DISTANCE FUCTIONS
__device__ float plane(float3 p, float3 c, float3 n) //plane signed distance field
{
return dot(p - c, n);
}
__device__ float floor(float3 pos)
{
return 2 + pos.y;
}
__device__ float sphere(float3 p, float3 sphere_position, float radius)
{
return length(p - sphere_position) - radius;
}
__device__ float torus(float3 rayPos, float3 pos, float rad) //torus have 2 radius, the main shape and the radius of the border
{
pos = rayPos - pos;
float2 radius = make_float2(rad, rad * 0.3);
float2 q = make_float2(length(make_float2(pos.x, pos.z)) - radius.x, pos.y);
return length(q) - radius.y;
}
__device__ float tetrahedron(float3 p, float3 pos, float e) //tetrahedron signed distance field, created from planes intersection
{
p = pos - p;
float f = 0.57735;
float a = plane(p, make_float3(e, e, e), make_float3(-f, f, f));
float b = plane(p, make_float3(e, -e, -e), make_float3(f, -f, f));
float c = plane(p, make_float3(-e, e, -e), make_float3(f, f, -f));
float d = plane(p, make_float3(-e, -e, e), make_float3(-f, -f, -f));
return max(max(a, b), max(c, d));
}
__device__ float ShapeDistance(float3 pos, Blob blob, float t )
{
float3 blob_pos = blob.position;// +(make_float3(cos(t * blob.movement.x), cos(t * blob.movement.y), cos(t * blob.movement.z))) * (blob.isMoving * param_device[0].move); //if is moving == 1, else == 0 and there is no add
if (blob.shape == 0)
return sphere(pos, blob_pos, blob.size);
if (blob.shape == 1)
return torus(pos, blob_pos, blob.size);
if (blob.shape == 2)
return tetrahedron(pos, blob_pos, blob.size);
return 0.0;
}
__device__ float3 getColor(float3 pos, float time) {
float3 color = (max(0.0, 1.0 - floor(pos)) * make_float3(0.0, 0.4, 0.0) * 1.0 )* param_device[0].plane_in_scene;
for (int i = 0; i < OBJ_IN_SCENE; i++)
//if (blobs_device[i].oper != 1)
color += max(0.0, 1.0 - ShapeDistance(pos, blobs_device[i], time)) * blobs_device[i].color * 1.0;
return color;
}
//################ MAPPING SCENE
__device__ float map(float3 p, float t) //virtual geometry
{
float result;
result = 1e20;
if (param_device[0].plane_in_scene == 1)
result = floor(p);
//register is more efficient than constant
int move = param_device[0].movingCam;
float3 p_c;
p_c = make_float3(p.x, cos(t) * p.y + sin(t) * p.z, -sin(t) * p.y + cos(t) * p.z);
p_c = make_float3(cos(t) * p_c.x - sin(t) * p_c.z, p_c.y, sin(t) * p_c.x + cos(t) * p_c.z);
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//insolita istruzione per rimuovere un if (move == 0) p = p_c
//sommo a p la posizione p_c - p (perch non voglio davvero sommare p_c a p) e moltiplico il risultato per move
//move uno se c' movimento, quindi p = p + p_c - p * 1 --> p = p_c
//move zero se non c' movimento p = p + (p_c - p) * 0 --> p = p
p = p + (p_c-p) * move ;
//for all shapes
//for(int i = 0; i < param_device[0].obj_in_scene; i++)
//shapes[i] = blobs_device[i].morph == 0 ? ShapeDistance(p, blobs[i]) : ChangeShape(blobs[i], p);
//For all unions operator
for (int i = 0; i < param_device[0].obj_in_scene; i++) {
if (blobs_device[i].oper == 0) //is union operator
result = smoothUnion(result, ShapeDistance(p, blobs_device[i], t), 0.5);
}
/*
for (int i = 0; i < OBJ_IN_SCENE; i++)
if (blobs_device[i].oper == 1) //is union operator
result = smoothSubtraction(ShapeDistance(p, blobs_device[i], t), result, 0.5);
*/
return result;
}
//################ RAYMARCH
__device__ RM raymarch(float3 ro, float3 rd, float time) //raymarching
{
float travel = 0.0;
float hit;
for (int i = 0; i < MAX_STEP; i++)
{
float3 point = ro + travel * rd;
hit = map(point, time);
travel += hit;
if (hit < PRECISION || travel > MAX_DIST) break;
}
RM result;
result.travel = travel;
//result.dist = hit;
return result;
}
//################ RENDERING POINT
__device__ float3 GetNormal(float3 point, float t)
{
float base = map(point, t);
float2 epsilon = make_float2(0.01, 0.0);
float3 normal = base - make_float3(
map(point - make_float3(0.01,0.0,0.0),t), //per capire lo slope, comparo i punti vicini al punto su cui calcolare la norm
map(point - make_float3(0.0, 0.01, 0.0),t),
map(point - make_float3(0.0, 0.0, 0.01),t));
return normalize(normal);
}
__device__ float3 render_point(float3 ro, float3 p, float t, float3 color) //directional derivative based lighting
{
float3 lightPosition = make_float3(0.0,5.0,-2.0);
float2 movLight = make_float2(sin(t * 0.5) * 4., cos(t * 0.5) * 4.0);
lightPosition += make_float3(movLight, 0.0);
float3 light = normalize(lightPosition - p);
float3 normal = GetNormal(p, t);
float3 finalColor = normal;
float3 toCamera = normalize(ro - p);
float shadowHit;
bool shadow;
//Shadow color
shadowHit = raymarch(p + (normal * PRECISION * 2.), light, t).travel;
shadow = shadowHit < length(p - lightPosition);
//_synchthreads() <- not here, because i have render_bg that could cause deadlock
//have to syncr the block because the raymarching may misalign threads
float diffuse = clamp(dot(normal, light), 0.0, 1.0); //faccio il clamp in modo da non aver un valore negativo
float3 diffuseColor = diffuse * color;
float specular = diffuse;
float3 specularColor = diffuseColor;
if (!shadow)
{
float3 reflectedLight = normalize(reflect(-light, normal));
specular = pow((double)clamp(dot(reflectedLight, light), 0.0, 1.0), 5.0);
specular = min(diffuse, specular);
specularColor = specular * make_float3(1.0, 1.0, 1.0); //specular color 1,1,1
finalColor = clamp((diffuseColor + specularColor),0.0,1.0);
}
else finalColor = float3(diffuseColor) * 0.4;
return finalColor;
}
__device__ float3 render_bg(float2 uv)
{
float3 color = make_float3(0.0, 0.0,0.0);
if (uv.y > 0.0) color = mix(MiddleColor, TopColor, uv.y*2);
if (uv.y <= 0.0) color = mix(MiddleColor, BottomColor, uv.y * -2);
//color = make_float3(uv.x, 0.0, uv.y);
return color;
}
//################ RAYMARCHING MAIN
//++++++++++++++++++++ 3
__global__ void rendering(float3* output, float k)
{
//get coordinate of pixel
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int i = (height - y - 1) * width + x;
//if(x==0 && y==0)
//printf("\n\n[%d]-> isMoving: %d, color: %1.4f - %1.4f - %1.4f, position: %2.4f, %2.4f, %2.4f", 5, blobs_device[5].isMoving, blobs_device[5].color.x, blobs_device[5].color.y, blobs_device[5].color.z, blobs_device[5].position.x, blobs_device[5].position.y, blobs_device[5].position.z);
float2 resolution = make_float2((float)width, (float)height); //screen resolution
float2 coordinates = make_float2((float)x, (float)y); //fragment coordinates
//float2 uv = (2.0 * coordinates - resolution) / resolution.y;
float2 uv = coordinates / resolution;
uv -= 0.5;
uv.x *= resolution.x / resolution.y;
float3 ro = make_float3(0.0f, 0.0f, -20.0f); //ray origin
float3 rd = normalize(make_float3(uv, 1.0f)); //ray direction
RM raym = raymarch(ro, rd, k);
//_synchthreads();
float dist = raym.travel;
float3 point = ro + dist * rd;
float3 c;
if (dist > MAX_DIST) c = render_bg(uv);
else c = render_point(ro, point, k, 1(point, k));
//else c = make_float3(dist, dist, dist);
float colour;
unsigned char bytes[] = { (unsigned char)(c.x * 255 + 0.5), (unsigned char)(c.y * 255 + 0.5), (unsigned char)(c.z * 255 + 0.5), 1 };
memcpy(&colour, &bytes, sizeof(colour)); //convert from 4 bytes to single float
output[i] = make_float3(x, y, colour);
}
//#################################### TIMER FUNCTIONS
void computeFPS()
{
frameCount++;
fpsCount++;
if (fpsCount == fpsLimit)
{
char fps[256];
float ifps = 1.0f / (sdkGetAverageTimerValue(&timer) / 1000.0f);
sprintf(fps, "fps: %3.f fps ", ifps);
glutSetWindowTitle(fps);
fpsCount = 0;
fpsLimit = (int)MAX(ifps, 1.0f);
sdkResetTimer(&timer);
}
}
void timerEvent(int value)
{
if (glutGetWindow())
{
glutPostRedisplay();
glutTimerFunc(REFRESH_DELAY, timerEvent, 0);
t += 0.01667777f;
}
}
//############################### DISPLAY LOOP
//++++++++++++++++++++ 2
void display(void)
{
sdkStartTimer(&timer);
hipDeviceSynchronize();
hipGLMapBufferObject__((void**)& device, buffer); //maps the buffer object into the address space of CUDA
glClear(GL_COLOR_BUFFER_BIT);
dim3 block(N_THREAD, N_THREAD, 1);
dim3 grid(width / block.x, height / block.y, 1);
rendering << < grid, block >> > (device, t); //execute kernel
hipDeviceSynchronize();
hipGLUnmapBufferObject(buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glVertexPointer(2, GL_FLOAT, 12, 0);
glColorPointer(4, GL_UNSIGNED_BYTE, 12, (GLvoid*)8);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glDrawArrays(GL_POINTS, 0, width * height);
glDisableClientState(GL_VERTEX_ARRAY);
glutSwapBuffers();
sdkStopTimer(&timer);
computeFPS();
}
//########################################################################################################### HOST
void changeSize(int w, int h)
{
//std::cout << "w " << w << " h " << h << std::endl;
glutReshapeWindow(width, height);
}
void keyboard(unsigned char key, int x, int y)
{
int i;
int* j = &i;
switch (key){
case 'f':
if (param_host[0].plane_in_scene == 0) {
std::cout << "add plane in scene" << std::endl;
param_host[0].plane_in_scene = 1;
hipMemcpyToSymbol(param_device, param_host, sizeof(struct PARAM), 0, hipMemcpyHostToDevice);
}
else {
std::cout << "remove plane from scene" << std::endl;
param_host[0].plane_in_scene = 0;
hipMemcpyToSymbol(param_device, param_host, sizeof(struct PARAM), 0, hipMemcpyHostToDevice);
}break;
case '+':
std::cout << "add object in scene (MAX 10 OBJ)" << std::endl;
param_host[0].obj_in_scene = param_host[0].obj_in_scene < 10 ? param_host[0].obj_in_scene +1 : param_host[0].obj_in_scene;
hipMemcpyToSymbol(param_device, param_host, sizeof(struct PARAM), 0, hipMemcpyHostToDevice);
break;
case '-':
std::cout << "remove object in scene (not negative)" << std::endl;
param_host[0].obj_in_scene = param_host[0].obj_in_scene > 0 ? param_host[0].obj_in_scene - 1 : param_host[0].obj_in_scene;
hipMemcpyToSymbol(param_device, param_host, sizeof(struct PARAM), 0, hipMemcpyHostToDevice);
break;
case 'm':
std::cout << "move camera" << std::endl;
param_host[0].movingCam = param_host[0].movingCam == 0 ? 1 : 0;
hipMemcpyToSymbol(param_device, param_host, sizeof(struct PARAM), 0, hipMemcpyHostToDevice);
break;
case 'a':
std::cout << "animate obj" << std::endl;
param_host[0].move = param_host[0].move == 0 ? 1 : 0;
hipMemcpyToSymbol(param_device, param_host, sizeof(struct PARAM), 0, hipMemcpyHostToDevice);
break;
case 'h':
std::cout << "\na: animate obj \nf: add floor or remove floor \n+: add object \n-:remove obj\n(obj in scene: " << param_host[0].obj_in_scene <<") \nm: move camera \nh: print help" << std::endl;
break;
}
glutPostRedisplay();
}
//++++++++++++++++++++ 1
int main(int argc, char** argv)
{
glutInit(&argc, argv); //OpenGL initializing
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB);
//creating window
glutInitWindowPosition(100, 100);
glutInitWindowSize(width, height);
glutCreateWindow("Basic CUDA OpenGL raymarching - tryy");
glClearColor(0.0, 0.0, 0.0, 0.0);
glMatrixMode(GL_PROJECTION);
gluOrtho2D(0.0, width, 0.0, height);
glutDisplayFunc(display); //register the call back
sdkCreateTimer(&timer);
glutTimerFunc(REFRESH_DELAY, timerEvent, 0);
glutReshapeFunc(changeSize);
glutKeyboardFunc(keyboard);
glewInit();
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
unsigned int size = width * height * sizeof(float3);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
hipGLRegisterBufferObject(buffer); //register the buffer object for access by CUDA
srand(time(NULL));
for (int i = 0; i < OBJ_IN_SCENE; i++)
{
//SETTING SCENE of 10 obj with random properties
float size = i;
float x = size;
float y = x / 2;
float z = y / 2;
x = int(x) % 2 == 0 ? 3.0 : -3.0;
y = (int)y % 2 == 0 ? 3.0 : -3.0;
z = (int)z % 2 == 0 ? 3.0 : -3.0;
if (size > 7) {
x = z = 0.0;
y = (int)size % 2;
}
Blob newObject;
float k = ((float)rand() / RAND_MAX) + 0.50;
//---------Pos
newObject.position = make_float3(x + k, y + k, z + k);
//---------Size
newObject.size = k;
//---------Color
newObject.color = make_float3((float)rand() / RAND_MAX, (float)rand() / RAND_MAX, (float)rand() / RAND_MAX);
//---------Shape
newObject.shape = rand() % 3; //0 sphere, 1 torus, 2 tetrahedreon
//---------Movement
newObject.movement = make_float3(rand() % 5, rand() % 5, rand() % 5);
newObject.isMoving = rand() % 2;
//---------Oper
newObject.oper = 0;//rand() % 2; //0 union, 1 subtraction
//---------Morph
newObject.morph = rand() % 2; //1 true, 0 false
blobs_host[i] = newObject;
}
for (int i = 0; i < OBJ_IN_SCENE; i++)
printf("\n[%d]: Shape: %d | position: %2.4f - %2.4f - %2.4f\t| color: %1.4f - %1.4f - %1.4f | size: %1.4f | oper: %d | morph: %d | isMoving: %d | movment: %2.4f - %2.4f - %2.4f ", i, blobs_host[i].shape, blobs_host[i].position.x, blobs_host[i].position.y, blobs_host[i].position.z, blobs_host[i].color.x, blobs_host[i].color.y, blobs_host[i].color.z, blobs_host[i].size, blobs_host[i].oper, blobs_host[i].morph, blobs_host[i].isMoving, blobs_host[i].movement.x, blobs_host[i].movement.y, blobs_host[i].movement.z);
param_host[0].movingCam = 0;
param_host[0].obj_in_scene = 10;
param_host[0].plane_in_scene = 0;
param_host[0].move = 0;
hipMemcpyToSymbol(param_device, param_host, sizeof(struct PARAM), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(blobs_device, blobs_host, sizeof(struct Blob) * 10, 0, hipMemcpyHostToDevice);
hipMalloc(&device, width * height * sizeof(float3)); //allocate memory on the GPU VRAM
std::cout << "\na: animate obj \nf: add floor or remove floor \n+: add object \n-:remove obj\n(obj in scene: " << param_host[0].obj_in_scene << ") \nm: move camera \nh: print help" << std::endl;
glutMainLoop(); //event processing loop
hipFree(device);
}
|
20d11d00b5251fb37d75fdc397a10be64ae37d66.cu
|
#include <device_functions.h>
#include <helper_math.h>
#include <GL/glew.h>
#include <GL/freeglut.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <iostream>
#include <time.h>
#include <timer.h>
#include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h
#define width 1280 //screen width
#define height 720 //screen height
#define MAX_STEP 400
#define MAX_DIST 100.
#define PRECISION 0.01
#define OBJ_IN_SCENE 10
#define N_THREAD 16
#define TopColor make_float3( 0.35, 0.4, 0.8 )
#define MiddleColor make_float3( 0.8, 0.8, 0.8 )
#define BottomColor make_float3( 0.8, 0.4, 0.33 )
#define REFRESH_DELAY 10 //ms
double t = 0.0f; //timer
float3* device; //pointer to memory on the device (GPU VRAM)
GLuint buffer; //buffer
StopWatchInterface* timer = NULL;
float fpsCount = 0;
float fpsLimit = 1;
double startFrame;
double endFrame;
int frameCount = 0;
float FPS;
//-----------------------
struct RM {
float dist;
float travel;
};
struct PARAM
{
int movingCam;
int plane_in_scene;
int obj_in_scene;
int move;
};
struct Blob
{
int shape;//sphere, cube, tourus
float3 o_position;
float3 position;
float3 color;
float size;
int oper; //union, substraction, intersection
int morph;
float3 movement;
int isMoving;
};
__constant__ __device__ Blob blobs_device[10];
Blob blobs_host[10];
__constant__ __device__ PARAM param_device[1];
PARAM param_host[1];
//--------------------------------------------------------------- DEVICE
//################ BASIC FUNC
__device__ float mix(float a, float b, float x)
{
return a * (1 - x) + b * x;
}
__device__ float3 mix(float3 a, float3 b, float x)
{
float r = mix(a.x, b.y, x);
float g = mix(a.y, b.y, x);
float bb = mix(a.z, a.z, x);
return make_float3(r, g, bb);
}
__device__ float clamp_f(float x, float min_v, float max_v)
{
return min(max(x, min_v), max_v);
}
__device__ float3 abs(float3 vec)
{
float3 r;
r.x = vec.x * ((vec.x < 0) * (-1) + (vec.x > 0));
r.y = vec.y * ((vec.y < 0) * (-1) + (vec.y > 0));
r.z = vec.z * ((vec.z < 0) * (-1) + (vec.z > 0));
return r;
}
//################ OPERATORS
__device__ float smoothUnion(float d1, float d2, float k) {
float h = clamp_f(0.5 + 0.5 * (d2 - d1) / k, 0.0, 1.0);
return mix(d2, d1, h) - k * h * (1.0 - h);
}
__device__ float smoothSubtraction(float d1, float d2, float k) {
float h = clamp_f(0.5 - 0.5 * (d2 + d1) / k, 0.0, 1.0);
return mix(d2, -d1, h) + k * h * (1.0 - h);
}
__device__ float changeShape(float dist1, float dist2, float time) //remember this is the k value in raymarching or t in mapping
{
return mix(dist1, dist2, sin(time) * .5 + .5);
}
//################ DISTANCE FUCTIONS
__device__ float plane(float3 p, float3 c, float3 n) //plane signed distance field
{
return dot(p - c, n);
}
__device__ float floor(float3 pos)
{
return 2 + pos.y;
}
__device__ float sphere(float3 p, float3 sphere_position, float radius)
{
return length(p - sphere_position) - radius;
}
__device__ float torus(float3 rayPos, float3 pos, float rad) //torus have 2 radius, the main shape and the radius of the border
{
pos = rayPos - pos;
float2 radius = make_float2(rad, rad * 0.3);
float2 q = make_float2(length(make_float2(pos.x, pos.z)) - radius.x, pos.y);
return length(q) - radius.y;
}
__device__ float tetrahedron(float3 p, float3 pos, float e) //tetrahedron signed distance field, created from planes intersection
{
p = pos - p;
float f = 0.57735;
float a = plane(p, make_float3(e, e, e), make_float3(-f, f, f));
float b = plane(p, make_float3(e, -e, -e), make_float3(f, -f, f));
float c = plane(p, make_float3(-e, e, -e), make_float3(f, f, -f));
float d = plane(p, make_float3(-e, -e, e), make_float3(-f, -f, -f));
return max(max(a, b), max(c, d));
}
__device__ float ShapeDistance(float3 pos, Blob blob, float t )
{
float3 blob_pos = blob.position;// +(make_float3(cos(t * blob.movement.x), cos(t * blob.movement.y), cos(t * blob.movement.z))) * (blob.isMoving * param_device[0].move); //if is moving == 1, else == 0 and there is no add
if (blob.shape == 0)
return sphere(pos, blob_pos, blob.size);
if (blob.shape == 1)
return torus(pos, blob_pos, blob.size);
if (blob.shape == 2)
return tetrahedron(pos, blob_pos, blob.size);
return 0.0;
}
__device__ float3 getColor(float3 pos, float time) {
float3 color = (max(0.0, 1.0 - floor(pos)) * make_float3(0.0, 0.4, 0.0) * 1.0 )* param_device[0].plane_in_scene;
for (int i = 0; i < OBJ_IN_SCENE; i++)
//if (blobs_device[i].oper != 1)
color += max(0.0, 1.0 - ShapeDistance(pos, blobs_device[i], time)) * blobs_device[i].color * 1.0;
return color;
}
//################ MAPPING SCENE
__device__ float map(float3 p, float t) //virtual geometry
{
float result;
result = 1e20;
if (param_device[0].plane_in_scene == 1)
result = floor(p);
//register is more efficient than constant
int move = param_device[0].movingCam;
float3 p_c;
p_c = make_float3(p.x, cos(t) * p.y + sin(t) * p.z, -sin(t) * p.y + cos(t) * p.z);
p_c = make_float3(cos(t) * p_c.x - sin(t) * p_c.z, p_c.y, sin(t) * p_c.x + cos(t) * p_c.z);
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//insolita istruzione per rimuovere un if (move == 0) p = p_c
//sommo a p la posizione p_c - p (perchè non voglio davvero sommare p_c a p) e moltiplico il risultato per move
//move è uno se c'è movimento, quindi p = p + p_c - p * 1 --> p = p_c
//move è zero se non c'è movimento p = p + (p_c - p) * 0 --> p = p
p = p + (p_c-p) * move ;
//for all shapes
//for(int i = 0; i < param_device[0].obj_in_scene; i++)
//shapes[i] = blobs_device[i].morph == 0 ? ShapeDistance(p, blobs[i]) : ChangeShape(blobs[i], p);
//For all unions operator
for (int i = 0; i < param_device[0].obj_in_scene; i++) {
if (blobs_device[i].oper == 0) //is union operator
result = smoothUnion(result, ShapeDistance(p, blobs_device[i], t), 0.5);
}
/*
for (int i = 0; i < OBJ_IN_SCENE; i++)
if (blobs_device[i].oper == 1) //is union operator
result = smoothSubtraction(ShapeDistance(p, blobs_device[i], t), result, 0.5);
*/
return result;
}
//################ RAYMARCH
__device__ RM raymarch(float3 ro, float3 rd, float time) //raymarching
{
float travel = 0.0;
float hit;
for (int i = 0; i < MAX_STEP; i++)
{
float3 point = ro + travel * rd;
hit = map(point, time);
travel += hit;
if (hit < PRECISION || travel > MAX_DIST) break;
}
RM result;
result.travel = travel;
//result.dist = hit;
return result;
}
//################ RENDERING POINT
__device__ float3 GetNormal(float3 point, float t)
{
float base = map(point, t);
float2 epsilon = make_float2(0.01, 0.0);
float3 normal = base - make_float3(
map(point - make_float3(0.01,0.0,0.0),t), //per capire lo slope, comparo i punti vicini al punto su cui calcolare la norm
map(point - make_float3(0.0, 0.01, 0.0),t),
map(point - make_float3(0.0, 0.0, 0.01),t));
return normalize(normal);
}
__device__ float3 render_point(float3 ro, float3 p, float t, float3 color) //directional derivative based lighting
{
float3 lightPosition = make_float3(0.0,5.0,-2.0);
float2 movLight = make_float2(sin(t * 0.5) * 4., cos(t * 0.5) * 4.0);
lightPosition += make_float3(movLight, 0.0);
float3 light = normalize(lightPosition - p);
float3 normal = GetNormal(p, t);
float3 finalColor = normal;
float3 toCamera = normalize(ro - p);
float shadowHit;
bool shadow;
//Shadow color
shadowHit = raymarch(p + (normal * PRECISION * 2.), light, t).travel;
shadow = shadowHit < length(p - lightPosition);
//_synchthreads() <- not here, because i have render_bg that could cause deadlock
//have to syncr the block because the raymarching may misalign threads
float diffuse = clamp(dot(normal, light), 0.0, 1.0); //faccio il clamp in modo da non aver un valore negativo
float3 diffuseColor = diffuse * color;
float specular = diffuse;
float3 specularColor = diffuseColor;
if (!shadow)
{
float3 reflectedLight = normalize(reflect(-light, normal));
specular = pow((double)clamp(dot(reflectedLight, light), 0.0, 1.0), 5.0);
specular = min(diffuse, specular);
specularColor = specular * make_float3(1.0, 1.0, 1.0); //specular color 1,1,1
finalColor = clamp((diffuseColor + specularColor),0.0,1.0);
}
else finalColor = float3(diffuseColor) * 0.4;
return finalColor;
}
__device__ float3 render_bg(float2 uv)
{
float3 color = make_float3(0.0, 0.0,0.0);
if (uv.y > 0.0) color = mix(MiddleColor, TopColor, uv.y*2);
if (uv.y <= 0.0) color = mix(MiddleColor, BottomColor, uv.y * -2);
//color = make_float3(uv.x, 0.0, uv.y);
return color;
}
//################ RAYMARCHING MAIN
//++++++++++++++++++++ 3
__global__ void rendering(float3* output, float k)
{
//get coordinate of pixel
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int i = (height - y - 1) * width + x;
//if(x==0 && y==0)
//printf("\n\n[%d]-> isMoving: %d, color: %1.4f - %1.4f - %1.4f, position: %2.4f, %2.4f, %2.4f", 5, blobs_device[5].isMoving, blobs_device[5].color.x, blobs_device[5].color.y, blobs_device[5].color.z, blobs_device[5].position.x, blobs_device[5].position.y, blobs_device[5].position.z);
float2 resolution = make_float2((float)width, (float)height); //screen resolution
float2 coordinates = make_float2((float)x, (float)y); //fragment coordinates
//float2 uv = (2.0 * coordinates - resolution) / resolution.y;
float2 uv = coordinates / resolution;
uv -= 0.5;
uv.x *= resolution.x / resolution.y;
float3 ro = make_float3(0.0f, 0.0f, -20.0f); //ray origin
float3 rd = normalize(make_float3(uv, 1.0f)); //ray direction
RM raym = raymarch(ro, rd, k);
//_synchthreads();
float dist = raym.travel;
float3 point = ro + dist * rd;
float3 c;
if (dist > MAX_DIST) c = render_bg(uv);
else c = render_point(ro, point, k, 1(point, k));
//else c = make_float3(dist, dist, dist);
float colour;
unsigned char bytes[] = { (unsigned char)(c.x * 255 + 0.5), (unsigned char)(c.y * 255 + 0.5), (unsigned char)(c.z * 255 + 0.5), 1 };
memcpy(&colour, &bytes, sizeof(colour)); //convert from 4 bytes to single float
output[i] = make_float3(x, y, colour);
}
//#################################### TIMER FUNCTIONS
void computeFPS()
{
frameCount++;
fpsCount++;
if (fpsCount == fpsLimit)
{
char fps[256];
float ifps = 1.0f / (sdkGetAverageTimerValue(&timer) / 1000.0f);
sprintf(fps, "fps: %3.f fps ", ifps);
glutSetWindowTitle(fps);
fpsCount = 0;
fpsLimit = (int)MAX(ifps, 1.0f);
sdkResetTimer(&timer);
}
}
void timerEvent(int value)
{
if (glutGetWindow())
{
glutPostRedisplay();
glutTimerFunc(REFRESH_DELAY, timerEvent, 0);
t += 0.01667777f;
}
}
//############################### DISPLAY LOOP
//++++++++++++++++++++ 2
void display(void)
{
sdkStartTimer(&timer);
cudaThreadSynchronize();
cudaGLMapBufferObject((void**)& device, buffer); //maps the buffer object into the address space of CUDA
glClear(GL_COLOR_BUFFER_BIT);
dim3 block(N_THREAD, N_THREAD, 1);
dim3 grid(width / block.x, height / block.y, 1);
rendering << < grid, block >> > (device, t); //execute kernel
cudaThreadSynchronize();
cudaGLUnmapBufferObject(buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glVertexPointer(2, GL_FLOAT, 12, 0);
glColorPointer(4, GL_UNSIGNED_BYTE, 12, (GLvoid*)8);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glDrawArrays(GL_POINTS, 0, width * height);
glDisableClientState(GL_VERTEX_ARRAY);
glutSwapBuffers();
sdkStopTimer(&timer);
computeFPS();
}
//########################################################################################################### HOST
void changeSize(int w, int h)
{
//std::cout << "w " << w << " h " << h << std::endl;
glutReshapeWindow(width, height);
}
void keyboard(unsigned char key, int x, int y)
{
int i;
int* j = &i;
switch (key){
case 'f':
if (param_host[0].plane_in_scene == 0) {
std::cout << "add plane in scene" << std::endl;
param_host[0].plane_in_scene = 1;
cudaMemcpyToSymbol(param_device, param_host, sizeof(struct PARAM), 0, cudaMemcpyHostToDevice);
}
else {
std::cout << "remove plane from scene" << std::endl;
param_host[0].plane_in_scene = 0;
cudaMemcpyToSymbol(param_device, param_host, sizeof(struct PARAM), 0, cudaMemcpyHostToDevice);
}break;
case '+':
std::cout << "add object in scene (MAX 10 OBJ)" << std::endl;
param_host[0].obj_in_scene = param_host[0].obj_in_scene < 10 ? param_host[0].obj_in_scene +1 : param_host[0].obj_in_scene;
cudaMemcpyToSymbol(param_device, param_host, sizeof(struct PARAM), 0, cudaMemcpyHostToDevice);
break;
case '-':
std::cout << "remove object in scene (not negative)" << std::endl;
param_host[0].obj_in_scene = param_host[0].obj_in_scene > 0 ? param_host[0].obj_in_scene - 1 : param_host[0].obj_in_scene;
cudaMemcpyToSymbol(param_device, param_host, sizeof(struct PARAM), 0, cudaMemcpyHostToDevice);
break;
case 'm':
std::cout << "move camera" << std::endl;
param_host[0].movingCam = param_host[0].movingCam == 0 ? 1 : 0;
cudaMemcpyToSymbol(param_device, param_host, sizeof(struct PARAM), 0, cudaMemcpyHostToDevice);
break;
case 'a':
std::cout << "animate obj" << std::endl;
param_host[0].move = param_host[0].move == 0 ? 1 : 0;
cudaMemcpyToSymbol(param_device, param_host, sizeof(struct PARAM), 0, cudaMemcpyHostToDevice);
break;
case 'h':
std::cout << "\na: animate obj \nf: add floor or remove floor \n+: add object \n-:remove obj\n(obj in scene: " << param_host[0].obj_in_scene <<") \nm: move camera \nh: print help" << std::endl;
break;
}
glutPostRedisplay();
}
//++++++++++++++++++++ 1
int main(int argc, char** argv)
{
glutInit(&argc, argv); //OpenGL initializing
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB);
//creating window
glutInitWindowPosition(100, 100);
glutInitWindowSize(width, height);
glutCreateWindow("Basic CUDA OpenGL raymarching - tryy");
glClearColor(0.0, 0.0, 0.0, 0.0);
glMatrixMode(GL_PROJECTION);
gluOrtho2D(0.0, width, 0.0, height);
glutDisplayFunc(display); //register the call back
sdkCreateTimer(&timer);
glutTimerFunc(REFRESH_DELAY, timerEvent, 0);
glutReshapeFunc(changeSize);
glutKeyboardFunc(keyboard);
glewInit();
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
unsigned int size = width * height * sizeof(float3);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
cudaGLRegisterBufferObject(buffer); //register the buffer object for access by CUDA
srand(time(NULL));
for (int i = 0; i < OBJ_IN_SCENE; i++)
{
//SETTING SCENE of 10 obj with random properties
float size = i;
float x = size;
float y = x / 2;
float z = y / 2;
x = int(x) % 2 == 0 ? 3.0 : -3.0;
y = (int)y % 2 == 0 ? 3.0 : -3.0;
z = (int)z % 2 == 0 ? 3.0 : -3.0;
if (size > 7) {
x = z = 0.0;
y = (int)size % 2;
}
Blob newObject;
float k = ((float)rand() / RAND_MAX) + 0.50;
//---------Pos
newObject.position = make_float3(x + k, y + k, z + k);
//---------Size
newObject.size = k;
//---------Color
newObject.color = make_float3((float)rand() / RAND_MAX, (float)rand() / RAND_MAX, (float)rand() / RAND_MAX);
//---------Shape
newObject.shape = rand() % 3; //0 sphere, 1 torus, 2 tetrahedreon
//---------Movement
newObject.movement = make_float3(rand() % 5, rand() % 5, rand() % 5);
newObject.isMoving = rand() % 2;
//---------Oper
newObject.oper = 0;//rand() % 2; //0 union, 1 subtraction
//---------Morph
newObject.morph = rand() % 2; //1 true, 0 false
blobs_host[i] = newObject;
}
for (int i = 0; i < OBJ_IN_SCENE; i++)
printf("\n[%d]: Shape: %d | position: %2.4f - %2.4f - %2.4f\t| color: %1.4f - %1.4f - %1.4f | size: %1.4f | oper: %d | morph: %d | isMoving: %d | movment: %2.4f - %2.4f - %2.4f ", i, blobs_host[i].shape, blobs_host[i].position.x, blobs_host[i].position.y, blobs_host[i].position.z, blobs_host[i].color.x, blobs_host[i].color.y, blobs_host[i].color.z, blobs_host[i].size, blobs_host[i].oper, blobs_host[i].morph, blobs_host[i].isMoving, blobs_host[i].movement.x, blobs_host[i].movement.y, blobs_host[i].movement.z);
param_host[0].movingCam = 0;
param_host[0].obj_in_scene = 10;
param_host[0].plane_in_scene = 0;
param_host[0].move = 0;
cudaMemcpyToSymbol(param_device, param_host, sizeof(struct PARAM), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(blobs_device, blobs_host, sizeof(struct Blob) * 10, 0, cudaMemcpyHostToDevice);
cudaMalloc(&device, width * height * sizeof(float3)); //allocate memory on the GPU VRAM
std::cout << "\na: animate obj \nf: add floor or remove floor \n+: add object \n-:remove obj\n(obj in scene: " << param_host[0].obj_in_scene << ") \nm: move camera \nh: print help" << std::endl;
glutMainLoop(); //event processing loop
cudaFree(device);
}
|
29fc7e352abf73d142fa622911cdfaa4345a493b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_accelerate_kernel [14][2];
static int dims_accelerate_kernel_h [14][2] = {0};
//user function
__device__
void accelerate_kernel_gpu(const ACC<double> &density0,
const ACC<double> &volume,
ACC<double> &stepbymass,
const ACC<double> &xvel0,
ACC<double> &xvel1,
const ACC<double> &xarea,
const ACC<double> &pressure,
const ACC<double> &yvel0,
ACC<double> &yvel1,
const ACC<double> &yarea,
const ACC<double> &viscosity,
const ACC<double> &zvel0,
ACC<double> &zvel1,
const ACC<double> &zarea) {
double nodal_mass = 0.0;
nodal_mass =(density0(-1,-1, 0) * volume(-1,-1, 0) +
density0( 0,-1, 0) * volume( 0,-1, 0) +
density0( 0, 0, 0) * volume( 0, 0, 0) +
density0(-1, 0, 0) * volume(-1, 0, 0) +
density0(-1,-1,-1) * volume(-1,-1,-1) +
density0( 0,-1,-1) * volume( 0,-1,-1) +
density0( 0, 0,-1) * volume( 0, 0,-1) +
density0(-1, 0,-1) * volume(-1, 0,-1)) * 0.125;
stepbymass(0,0,0) = 0.25*dt / nodal_mass;
xvel1(0,0,0) = xvel0(0,0,0) - stepbymass(0,0,0) *
( xarea(0,0,0) * ( pressure(0,0,0) - pressure(-1,0,0) ) +
xarea(0,-1,0) * ( pressure(0,-1,0) - pressure(-1,-1,0) ) +
xarea(0,0,-1) * ( pressure(0,0,-1) - pressure(-1,0,-1) ) +
xarea(0,-1,-1) * ( pressure(0,-1,-1) - pressure(-1,-1,-1) ) );
yvel1(0,0,0) = yvel0(0,0,0) - stepbymass(0,0,0) *
( yarea(0,0,0) * ( pressure(0,0,0) - pressure(0,-1,0) ) +
yarea(-1,0,0) * ( pressure(-1,0,0) - pressure(-1,-1,0) ) +
yarea(0,0,-1) * ( pressure(0,0,-1) - pressure(0,-1,-1) ) +
yarea(-1,0,-1)* ( pressure(-1,0,-1) - pressure(-1,-1,-1) ) );
zvel1(0,0,0) = zvel0(0,0,0) - stepbymass(0,0,0) *
( zarea(0,0,0) * ( pressure(0,0,0) - pressure(0,0,-1) ) +
zarea(0,-1,0) * ( pressure(0,-1,0) - pressure(0,-1,-1) ) +
zarea(-1,0,0) * ( pressure(-1,0,0) - pressure(-1,0,-1) ) +
zarea(-1,-1,0)* ( pressure(-1,-1,0) - pressure(-1,-1,-1) ) );
xvel1(0,0,0) = xvel1(0,0,0) - stepbymass(0,0,0) *
( xarea(0,0,0) * ( viscosity(0,0,0) - viscosity(-1,0,0) ) +
xarea(0,-1,0) * ( viscosity(0,-1,0) - viscosity(-1,-1,0) ) +
xarea(0,0,-1) * ( viscosity(0,0,-1) - viscosity(-1,0,-1) ) +
xarea(0,-1,-1)* ( viscosity(0,-1,-1) - viscosity(-1,-1,-1) ) );
yvel1(0,0,0) = yvel1(0,0,0) - stepbymass(0,0,0) *
( yarea(0,0,0) * ( viscosity(0,0,0) - viscosity(0,-1,0) ) +
yarea(-1,0,0) * ( viscosity(-1,0,0) - viscosity(-1,-1,0) ) +
yarea(0,0,-1) * ( viscosity(0,0,-1) - viscosity(0,-1,-1) ) +
yarea(-1,0,-1)* ( viscosity(-1,0,-1)- viscosity(-1,-1,-1) ) );
zvel1(0,0,0) = zvel1(0,0,0) - stepbymass(0,0,0) *
( zarea(0,0,0) * ( viscosity(0,0,0) - viscosity(0,0,-1) ) +
zarea(0,-1,0) * ( viscosity(0,-1,0) - viscosity(0,-1,-1) ) +
zarea(-1,0,0) * ( viscosity(-1,0,0) - viscosity(-1,0,-1) ) +
zarea(-1,-1,0)* ( viscosity(-1,-1,0)- viscosity(-1,-1,-1) ) );
}
__global__ void ops_accelerate_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
double* __restrict arg7,
double* __restrict arg8,
double* __restrict arg9,
double* __restrict arg10,
double* __restrict arg11,
double* __restrict arg12,
double* __restrict arg13,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[0][0] + idx_z * 1*1 * dims_accelerate_kernel[0][0] * dims_accelerate_kernel[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[1][0] + idx_z * 1*1 * dims_accelerate_kernel[1][0] * dims_accelerate_kernel[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[2][0] + idx_z * 1*1 * dims_accelerate_kernel[2][0] * dims_accelerate_kernel[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[3][0] + idx_z * 1*1 * dims_accelerate_kernel[3][0] * dims_accelerate_kernel[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[4][0] + idx_z * 1*1 * dims_accelerate_kernel[4][0] * dims_accelerate_kernel[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[5][0] + idx_z * 1*1 * dims_accelerate_kernel[5][0] * dims_accelerate_kernel[5][1];
arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[6][0] + idx_z * 1*1 * dims_accelerate_kernel[6][0] * dims_accelerate_kernel[6][1];
arg7 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[7][0] + idx_z * 1*1 * dims_accelerate_kernel[7][0] * dims_accelerate_kernel[7][1];
arg8 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[8][0] + idx_z * 1*1 * dims_accelerate_kernel[8][0] * dims_accelerate_kernel[8][1];
arg9 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[9][0] + idx_z * 1*1 * dims_accelerate_kernel[9][0] * dims_accelerate_kernel[9][1];
arg10 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[10][0] + idx_z * 1*1 * dims_accelerate_kernel[10][0] * dims_accelerate_kernel[10][1];
arg11 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[11][0] + idx_z * 1*1 * dims_accelerate_kernel[11][0] * dims_accelerate_kernel[11][1];
arg12 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[12][0] + idx_z * 1*1 * dims_accelerate_kernel[12][0] * dims_accelerate_kernel[12][1];
arg13 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[13][0] + idx_z * 1*1 * dims_accelerate_kernel[13][0] * dims_accelerate_kernel[13][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
const ACC<double> argp0(dims_accelerate_kernel[0][0], dims_accelerate_kernel[0][1], arg0);
const ACC<double> argp1(dims_accelerate_kernel[1][0], dims_accelerate_kernel[1][1], arg1);
ACC<double> argp2(dims_accelerate_kernel[2][0], dims_accelerate_kernel[2][1], arg2);
const ACC<double> argp3(dims_accelerate_kernel[3][0], dims_accelerate_kernel[3][1], arg3);
ACC<double> argp4(dims_accelerate_kernel[4][0], dims_accelerate_kernel[4][1], arg4);
const ACC<double> argp5(dims_accelerate_kernel[5][0], dims_accelerate_kernel[5][1], arg5);
const ACC<double> argp6(dims_accelerate_kernel[6][0], dims_accelerate_kernel[6][1], arg6);
const ACC<double> argp7(dims_accelerate_kernel[7][0], dims_accelerate_kernel[7][1], arg7);
ACC<double> argp8(dims_accelerate_kernel[8][0], dims_accelerate_kernel[8][1], arg8);
const ACC<double> argp9(dims_accelerate_kernel[9][0], dims_accelerate_kernel[9][1], arg9);
const ACC<double> argp10(dims_accelerate_kernel[10][0], dims_accelerate_kernel[10][1], arg10);
const ACC<double> argp11(dims_accelerate_kernel[11][0], dims_accelerate_kernel[11][1], arg11);
ACC<double> argp12(dims_accelerate_kernel[12][0], dims_accelerate_kernel[12][1], arg12);
const ACC<double> argp13(dims_accelerate_kernel[13][0], dims_accelerate_kernel[13][1], arg13);
accelerate_kernel_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6, argp7, argp8,
argp9, argp10, argp11, argp12, argp13);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_accelerate_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8,
ops_arg arg9, ops_arg arg10, ops_arg arg11, ops_arg arg12, ops_arg arg13) {
#else
void ops_par_loop_accelerate_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
ops_arg arg8 = desc->args[8];
ops_arg arg9 = desc->args[9];
ops_arg arg10 = desc->args[10];
ops_arg arg11 = desc->args[11];
ops_arg arg12 = desc->args[12];
ops_arg arg13 = desc->args[13];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[14] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,14,range,104)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(104,"accelerate_kernel");
OPS_kernels[104].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 14,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0];
int ydim7 = args[7].dat->size[1];
int xdim8 = args[8].dat->size[0];
int ydim8 = args[8].dat->size[1];
int xdim9 = args[9].dat->size[0];
int ydim9 = args[9].dat->size[1];
int xdim10 = args[10].dat->size[0];
int ydim10 = args[10].dat->size[1];
int xdim11 = args[11].dat->size[0];
int ydim11 = args[11].dat->size[1];
int xdim12 = args[12].dat->size[0];
int ydim12 = args[12].dat->size[1];
int xdim13 = args[13].dat->size[0];
int ydim13 = args[13].dat->size[1];
if (xdim0 != dims_accelerate_kernel_h[0][0] || ydim0 != dims_accelerate_kernel_h[0][1] || xdim1 != dims_accelerate_kernel_h[1][0] || ydim1 != dims_accelerate_kernel_h[1][1] || xdim2 != dims_accelerate_kernel_h[2][0] || ydim2 != dims_accelerate_kernel_h[2][1] || xdim3 != dims_accelerate_kernel_h[3][0] || ydim3 != dims_accelerate_kernel_h[3][1] || xdim4 != dims_accelerate_kernel_h[4][0] || ydim4 != dims_accelerate_kernel_h[4][1] || xdim5 != dims_accelerate_kernel_h[5][0] || ydim5 != dims_accelerate_kernel_h[5][1] || xdim6 != dims_accelerate_kernel_h[6][0] || ydim6 != dims_accelerate_kernel_h[6][1] || xdim7 != dims_accelerate_kernel_h[7][0] || ydim7 != dims_accelerate_kernel_h[7][1] || xdim8 != dims_accelerate_kernel_h[8][0] || ydim8 != dims_accelerate_kernel_h[8][1] || xdim9 != dims_accelerate_kernel_h[9][0] || ydim9 != dims_accelerate_kernel_h[9][1] || xdim10 != dims_accelerate_kernel_h[10][0] || ydim10 != dims_accelerate_kernel_h[10][1] || xdim11 != dims_accelerate_kernel_h[11][0] || ydim11 != dims_accelerate_kernel_h[11][1] || xdim12 != dims_accelerate_kernel_h[12][0] || ydim12 != dims_accelerate_kernel_h[12][1] || xdim13 != dims_accelerate_kernel_h[13][0] || ydim13 != dims_accelerate_kernel_h[13][1]) {
dims_accelerate_kernel_h[0][0] = xdim0;
dims_accelerate_kernel_h[0][1] = ydim0;
dims_accelerate_kernel_h[1][0] = xdim1;
dims_accelerate_kernel_h[1][1] = ydim1;
dims_accelerate_kernel_h[2][0] = xdim2;
dims_accelerate_kernel_h[2][1] = ydim2;
dims_accelerate_kernel_h[3][0] = xdim3;
dims_accelerate_kernel_h[3][1] = ydim3;
dims_accelerate_kernel_h[4][0] = xdim4;
dims_accelerate_kernel_h[4][1] = ydim4;
dims_accelerate_kernel_h[5][0] = xdim5;
dims_accelerate_kernel_h[5][1] = ydim5;
dims_accelerate_kernel_h[6][0] = xdim6;
dims_accelerate_kernel_h[6][1] = ydim6;
dims_accelerate_kernel_h[7][0] = xdim7;
dims_accelerate_kernel_h[7][1] = ydim7;
dims_accelerate_kernel_h[8][0] = xdim8;
dims_accelerate_kernel_h[8][1] = ydim8;
dims_accelerate_kernel_h[9][0] = xdim9;
dims_accelerate_kernel_h[9][1] = ydim9;
dims_accelerate_kernel_h[10][0] = xdim10;
dims_accelerate_kernel_h[10][1] = ydim10;
dims_accelerate_kernel_h[11][0] = xdim11;
dims_accelerate_kernel_h[11][1] = ydim11;
dims_accelerate_kernel_h[12][0] = xdim12;
dims_accelerate_kernel_h[12][1] = ydim12;
dims_accelerate_kernel_h[13][0] = xdim13;
dims_accelerate_kernel_h[13][1] = ydim13;
cutilSafeCall(hipMemcpyToSymbol( dims_accelerate_kernel, dims_accelerate_kernel_h, sizeof(dims_accelerate_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size);
int dat8 = (OPS_soa ? args[8].dat->type_size : args[8].dat->elem_size);
int dat9 = (OPS_soa ? args[9].dat->type_size : args[9].dat->elem_size);
int dat10 = (OPS_soa ? args[10].dat->type_size : args[10].dat->elem_size);
int dat11 = (OPS_soa ? args[11].dat->type_size : args[11].dat->elem_size);
int dat12 = (OPS_soa ? args[12].dat->type_size : args[12].dat->elem_size);
int dat13 = (OPS_soa ? args[13].dat->type_size : args[13].dat->elem_size);
char *p_a[14];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
int base7 = args[7].dat->base_offset +
dat7 * 1 * (start[0] * args[7].stencil->stride[0]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
(start[1] * args[7].stencil->stride[1]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2]);
p_a[7] = (char *)args[7].data_d + base7;
int base8 = args[8].dat->base_offset +
dat8 * 1 * (start[0] * args[8].stencil->stride[0]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
(start[1] * args[8].stencil->stride[1]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
args[8].dat->size[1] *
(start[2] * args[8].stencil->stride[2]);
p_a[8] = (char *)args[8].data_d + base8;
int base9 = args[9].dat->base_offset +
dat9 * 1 * (start[0] * args[9].stencil->stride[0]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
(start[1] * args[9].stencil->stride[1]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
args[9].dat->size[1] *
(start[2] * args[9].stencil->stride[2]);
p_a[9] = (char *)args[9].data_d + base9;
int base10 = args[10].dat->base_offset +
dat10 * 1 * (start[0] * args[10].stencil->stride[0]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
(start[1] * args[10].stencil->stride[1]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
args[10].dat->size[1] *
(start[2] * args[10].stencil->stride[2]);
p_a[10] = (char *)args[10].data_d + base10;
int base11 = args[11].dat->base_offset +
dat11 * 1 * (start[0] * args[11].stencil->stride[0]);
base11 = base11+ dat11 *
args[11].dat->size[0] *
(start[1] * args[11].stencil->stride[1]);
base11 = base11+ dat11 *
args[11].dat->size[0] *
args[11].dat->size[1] *
(start[2] * args[11].stencil->stride[2]);
p_a[11] = (char *)args[11].data_d + base11;
int base12 = args[12].dat->base_offset +
dat12 * 1 * (start[0] * args[12].stencil->stride[0]);
base12 = base12+ dat12 *
args[12].dat->size[0] *
(start[1] * args[12].stencil->stride[1]);
base12 = base12+ dat12 *
args[12].dat->size[0] *
args[12].dat->size[1] *
(start[2] * args[12].stencil->stride[2]);
p_a[12] = (char *)args[12].data_d + base12;
int base13 = args[13].dat->base_offset +
dat13 * 1 * (start[0] * args[13].stencil->stride[0]);
base13 = base13+ dat13 *
args[13].dat->size[0] *
(start[1] * args[13].stencil->stride[1]);
base13 = base13+ dat13 *
args[13].dat->size[0] *
args[13].dat->size[1] *
(start[2] * args[13].stencil->stride[2]);
p_a[13] = (char *)args[13].data_d + base13;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 14);
ops_halo_exchanges(args,14,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[104].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_accelerate_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)p_a[7],
(double *)p_a[8], (double *)p_a[9],
(double *)p_a[10], (double *)p_a[11],
(double *)p_a[12], (double *)p_a[13],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[104].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 14);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[8],range);
ops_set_halo_dirtybit3(&args[12],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[104].mpi_time += t2-t1;
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg7);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg8);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg9);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg10);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg11);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg12);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg13);
}
}
#ifdef OPS_LAZY
void ops_par_loop_accelerate_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11, ops_arg arg12, ops_arg arg13) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 104;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 104;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 14;
desc->args = (ops_arg*)malloc(14*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index;
desc->args[8] = arg8;
desc->hash = ((desc->hash << 5) + desc->hash) + arg8.dat->index;
desc->args[9] = arg9;
desc->hash = ((desc->hash << 5) + desc->hash) + arg9.dat->index;
desc->args[10] = arg10;
desc->hash = ((desc->hash << 5) + desc->hash) + arg10.dat->index;
desc->args[11] = arg11;
desc->hash = ((desc->hash << 5) + desc->hash) + arg11.dat->index;
desc->args[12] = arg12;
desc->hash = ((desc->hash << 5) + desc->hash) + arg12.dat->index;
desc->args[13] = arg13;
desc->hash = ((desc->hash << 5) + desc->hash) + arg13.dat->index;
desc->function = ops_par_loop_accelerate_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(104,"accelerate_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
29fc7e352abf73d142fa622911cdfaa4345a493b.cu
|
//
// auto-generated by ops.py
//
__constant__ int dims_accelerate_kernel [14][2];
static int dims_accelerate_kernel_h [14][2] = {0};
//user function
__device__
void accelerate_kernel_gpu(const ACC<double> &density0,
const ACC<double> &volume,
ACC<double> &stepbymass,
const ACC<double> &xvel0,
ACC<double> &xvel1,
const ACC<double> &xarea,
const ACC<double> &pressure,
const ACC<double> &yvel0,
ACC<double> &yvel1,
const ACC<double> &yarea,
const ACC<double> &viscosity,
const ACC<double> &zvel0,
ACC<double> &zvel1,
const ACC<double> &zarea) {
double nodal_mass = 0.0;
nodal_mass =(density0(-1,-1, 0) * volume(-1,-1, 0) +
density0( 0,-1, 0) * volume( 0,-1, 0) +
density0( 0, 0, 0) * volume( 0, 0, 0) +
density0(-1, 0, 0) * volume(-1, 0, 0) +
density0(-1,-1,-1) * volume(-1,-1,-1) +
density0( 0,-1,-1) * volume( 0,-1,-1) +
density0( 0, 0,-1) * volume( 0, 0,-1) +
density0(-1, 0,-1) * volume(-1, 0,-1)) * 0.125;
stepbymass(0,0,0) = 0.25*dt / nodal_mass;
xvel1(0,0,0) = xvel0(0,0,0) - stepbymass(0,0,0) *
( xarea(0,0,0) * ( pressure(0,0,0) - pressure(-1,0,0) ) +
xarea(0,-1,0) * ( pressure(0,-1,0) - pressure(-1,-1,0) ) +
xarea(0,0,-1) * ( pressure(0,0,-1) - pressure(-1,0,-1) ) +
xarea(0,-1,-1) * ( pressure(0,-1,-1) - pressure(-1,-1,-1) ) );
yvel1(0,0,0) = yvel0(0,0,0) - stepbymass(0,0,0) *
( yarea(0,0,0) * ( pressure(0,0,0) - pressure(0,-1,0) ) +
yarea(-1,0,0) * ( pressure(-1,0,0) - pressure(-1,-1,0) ) +
yarea(0,0,-1) * ( pressure(0,0,-1) - pressure(0,-1,-1) ) +
yarea(-1,0,-1)* ( pressure(-1,0,-1) - pressure(-1,-1,-1) ) );
zvel1(0,0,0) = zvel0(0,0,0) - stepbymass(0,0,0) *
( zarea(0,0,0) * ( pressure(0,0,0) - pressure(0,0,-1) ) +
zarea(0,-1,0) * ( pressure(0,-1,0) - pressure(0,-1,-1) ) +
zarea(-1,0,0) * ( pressure(-1,0,0) - pressure(-1,0,-1) ) +
zarea(-1,-1,0)* ( pressure(-1,-1,0) - pressure(-1,-1,-1) ) );
xvel1(0,0,0) = xvel1(0,0,0) - stepbymass(0,0,0) *
( xarea(0,0,0) * ( viscosity(0,0,0) - viscosity(-1,0,0) ) +
xarea(0,-1,0) * ( viscosity(0,-1,0) - viscosity(-1,-1,0) ) +
xarea(0,0,-1) * ( viscosity(0,0,-1) - viscosity(-1,0,-1) ) +
xarea(0,-1,-1)* ( viscosity(0,-1,-1) - viscosity(-1,-1,-1) ) );
yvel1(0,0,0) = yvel1(0,0,0) - stepbymass(0,0,0) *
( yarea(0,0,0) * ( viscosity(0,0,0) - viscosity(0,-1,0) ) +
yarea(-1,0,0) * ( viscosity(-1,0,0) - viscosity(-1,-1,0) ) +
yarea(0,0,-1) * ( viscosity(0,0,-1) - viscosity(0,-1,-1) ) +
yarea(-1,0,-1)* ( viscosity(-1,0,-1)- viscosity(-1,-1,-1) ) );
zvel1(0,0,0) = zvel1(0,0,0) - stepbymass(0,0,0) *
( zarea(0,0,0) * ( viscosity(0,0,0) - viscosity(0,0,-1) ) +
zarea(0,-1,0) * ( viscosity(0,-1,0) - viscosity(0,-1,-1) ) +
zarea(-1,0,0) * ( viscosity(-1,0,0) - viscosity(-1,0,-1) ) +
zarea(-1,-1,0)* ( viscosity(-1,-1,0)- viscosity(-1,-1,-1) ) );
}
__global__ void ops_accelerate_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
double* __restrict arg7,
double* __restrict arg8,
double* __restrict arg9,
double* __restrict arg10,
double* __restrict arg11,
double* __restrict arg12,
double* __restrict arg13,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[0][0] + idx_z * 1*1 * dims_accelerate_kernel[0][0] * dims_accelerate_kernel[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[1][0] + idx_z * 1*1 * dims_accelerate_kernel[1][0] * dims_accelerate_kernel[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[2][0] + idx_z * 1*1 * dims_accelerate_kernel[2][0] * dims_accelerate_kernel[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[3][0] + idx_z * 1*1 * dims_accelerate_kernel[3][0] * dims_accelerate_kernel[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[4][0] + idx_z * 1*1 * dims_accelerate_kernel[4][0] * dims_accelerate_kernel[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[5][0] + idx_z * 1*1 * dims_accelerate_kernel[5][0] * dims_accelerate_kernel[5][1];
arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[6][0] + idx_z * 1*1 * dims_accelerate_kernel[6][0] * dims_accelerate_kernel[6][1];
arg7 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[7][0] + idx_z * 1*1 * dims_accelerate_kernel[7][0] * dims_accelerate_kernel[7][1];
arg8 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[8][0] + idx_z * 1*1 * dims_accelerate_kernel[8][0] * dims_accelerate_kernel[8][1];
arg9 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[9][0] + idx_z * 1*1 * dims_accelerate_kernel[9][0] * dims_accelerate_kernel[9][1];
arg10 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[10][0] + idx_z * 1*1 * dims_accelerate_kernel[10][0] * dims_accelerate_kernel[10][1];
arg11 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[11][0] + idx_z * 1*1 * dims_accelerate_kernel[11][0] * dims_accelerate_kernel[11][1];
arg12 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[12][0] + idx_z * 1*1 * dims_accelerate_kernel[12][0] * dims_accelerate_kernel[12][1];
arg13 += idx_x * 1*1 + idx_y * 1*1 * dims_accelerate_kernel[13][0] + idx_z * 1*1 * dims_accelerate_kernel[13][0] * dims_accelerate_kernel[13][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
const ACC<double> argp0(dims_accelerate_kernel[0][0], dims_accelerate_kernel[0][1], arg0);
const ACC<double> argp1(dims_accelerate_kernel[1][0], dims_accelerate_kernel[1][1], arg1);
ACC<double> argp2(dims_accelerate_kernel[2][0], dims_accelerate_kernel[2][1], arg2);
const ACC<double> argp3(dims_accelerate_kernel[3][0], dims_accelerate_kernel[3][1], arg3);
ACC<double> argp4(dims_accelerate_kernel[4][0], dims_accelerate_kernel[4][1], arg4);
const ACC<double> argp5(dims_accelerate_kernel[5][0], dims_accelerate_kernel[5][1], arg5);
const ACC<double> argp6(dims_accelerate_kernel[6][0], dims_accelerate_kernel[6][1], arg6);
const ACC<double> argp7(dims_accelerate_kernel[7][0], dims_accelerate_kernel[7][1], arg7);
ACC<double> argp8(dims_accelerate_kernel[8][0], dims_accelerate_kernel[8][1], arg8);
const ACC<double> argp9(dims_accelerate_kernel[9][0], dims_accelerate_kernel[9][1], arg9);
const ACC<double> argp10(dims_accelerate_kernel[10][0], dims_accelerate_kernel[10][1], arg10);
const ACC<double> argp11(dims_accelerate_kernel[11][0], dims_accelerate_kernel[11][1], arg11);
ACC<double> argp12(dims_accelerate_kernel[12][0], dims_accelerate_kernel[12][1], arg12);
const ACC<double> argp13(dims_accelerate_kernel[13][0], dims_accelerate_kernel[13][1], arg13);
accelerate_kernel_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6, argp7, argp8,
argp9, argp10, argp11, argp12, argp13);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_accelerate_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8,
ops_arg arg9, ops_arg arg10, ops_arg arg11, ops_arg arg12, ops_arg arg13) {
#else
void ops_par_loop_accelerate_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
ops_arg arg8 = desc->args[8];
ops_arg arg9 = desc->args[9];
ops_arg arg10 = desc->args[10];
ops_arg arg11 = desc->args[11];
ops_arg arg12 = desc->args[12];
ops_arg arg13 = desc->args[13];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[14] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,14,range,104)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(104,"accelerate_kernel");
OPS_kernels[104].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 14,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0];
int ydim7 = args[7].dat->size[1];
int xdim8 = args[8].dat->size[0];
int ydim8 = args[8].dat->size[1];
int xdim9 = args[9].dat->size[0];
int ydim9 = args[9].dat->size[1];
int xdim10 = args[10].dat->size[0];
int ydim10 = args[10].dat->size[1];
int xdim11 = args[11].dat->size[0];
int ydim11 = args[11].dat->size[1];
int xdim12 = args[12].dat->size[0];
int ydim12 = args[12].dat->size[1];
int xdim13 = args[13].dat->size[0];
int ydim13 = args[13].dat->size[1];
if (xdim0 != dims_accelerate_kernel_h[0][0] || ydim0 != dims_accelerate_kernel_h[0][1] || xdim1 != dims_accelerate_kernel_h[1][0] || ydim1 != dims_accelerate_kernel_h[1][1] || xdim2 != dims_accelerate_kernel_h[2][0] || ydim2 != dims_accelerate_kernel_h[2][1] || xdim3 != dims_accelerate_kernel_h[3][0] || ydim3 != dims_accelerate_kernel_h[3][1] || xdim4 != dims_accelerate_kernel_h[4][0] || ydim4 != dims_accelerate_kernel_h[4][1] || xdim5 != dims_accelerate_kernel_h[5][0] || ydim5 != dims_accelerate_kernel_h[5][1] || xdim6 != dims_accelerate_kernel_h[6][0] || ydim6 != dims_accelerate_kernel_h[6][1] || xdim7 != dims_accelerate_kernel_h[7][0] || ydim7 != dims_accelerate_kernel_h[7][1] || xdim8 != dims_accelerate_kernel_h[8][0] || ydim8 != dims_accelerate_kernel_h[8][1] || xdim9 != dims_accelerate_kernel_h[9][0] || ydim9 != dims_accelerate_kernel_h[9][1] || xdim10 != dims_accelerate_kernel_h[10][0] || ydim10 != dims_accelerate_kernel_h[10][1] || xdim11 != dims_accelerate_kernel_h[11][0] || ydim11 != dims_accelerate_kernel_h[11][1] || xdim12 != dims_accelerate_kernel_h[12][0] || ydim12 != dims_accelerate_kernel_h[12][1] || xdim13 != dims_accelerate_kernel_h[13][0] || ydim13 != dims_accelerate_kernel_h[13][1]) {
dims_accelerate_kernel_h[0][0] = xdim0;
dims_accelerate_kernel_h[0][1] = ydim0;
dims_accelerate_kernel_h[1][0] = xdim1;
dims_accelerate_kernel_h[1][1] = ydim1;
dims_accelerate_kernel_h[2][0] = xdim2;
dims_accelerate_kernel_h[2][1] = ydim2;
dims_accelerate_kernel_h[3][0] = xdim3;
dims_accelerate_kernel_h[3][1] = ydim3;
dims_accelerate_kernel_h[4][0] = xdim4;
dims_accelerate_kernel_h[4][1] = ydim4;
dims_accelerate_kernel_h[5][0] = xdim5;
dims_accelerate_kernel_h[5][1] = ydim5;
dims_accelerate_kernel_h[6][0] = xdim6;
dims_accelerate_kernel_h[6][1] = ydim6;
dims_accelerate_kernel_h[7][0] = xdim7;
dims_accelerate_kernel_h[7][1] = ydim7;
dims_accelerate_kernel_h[8][0] = xdim8;
dims_accelerate_kernel_h[8][1] = ydim8;
dims_accelerate_kernel_h[9][0] = xdim9;
dims_accelerate_kernel_h[9][1] = ydim9;
dims_accelerate_kernel_h[10][0] = xdim10;
dims_accelerate_kernel_h[10][1] = ydim10;
dims_accelerate_kernel_h[11][0] = xdim11;
dims_accelerate_kernel_h[11][1] = ydim11;
dims_accelerate_kernel_h[12][0] = xdim12;
dims_accelerate_kernel_h[12][1] = ydim12;
dims_accelerate_kernel_h[13][0] = xdim13;
dims_accelerate_kernel_h[13][1] = ydim13;
cutilSafeCall(cudaMemcpyToSymbol( dims_accelerate_kernel, dims_accelerate_kernel_h, sizeof(dims_accelerate_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size);
int dat8 = (OPS_soa ? args[8].dat->type_size : args[8].dat->elem_size);
int dat9 = (OPS_soa ? args[9].dat->type_size : args[9].dat->elem_size);
int dat10 = (OPS_soa ? args[10].dat->type_size : args[10].dat->elem_size);
int dat11 = (OPS_soa ? args[11].dat->type_size : args[11].dat->elem_size);
int dat12 = (OPS_soa ? args[12].dat->type_size : args[12].dat->elem_size);
int dat13 = (OPS_soa ? args[13].dat->type_size : args[13].dat->elem_size);
char *p_a[14];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
int base7 = args[7].dat->base_offset +
dat7 * 1 * (start[0] * args[7].stencil->stride[0]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
(start[1] * args[7].stencil->stride[1]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2]);
p_a[7] = (char *)args[7].data_d + base7;
int base8 = args[8].dat->base_offset +
dat8 * 1 * (start[0] * args[8].stencil->stride[0]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
(start[1] * args[8].stencil->stride[1]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
args[8].dat->size[1] *
(start[2] * args[8].stencil->stride[2]);
p_a[8] = (char *)args[8].data_d + base8;
int base9 = args[9].dat->base_offset +
dat9 * 1 * (start[0] * args[9].stencil->stride[0]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
(start[1] * args[9].stencil->stride[1]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
args[9].dat->size[1] *
(start[2] * args[9].stencil->stride[2]);
p_a[9] = (char *)args[9].data_d + base9;
int base10 = args[10].dat->base_offset +
dat10 * 1 * (start[0] * args[10].stencil->stride[0]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
(start[1] * args[10].stencil->stride[1]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
args[10].dat->size[1] *
(start[2] * args[10].stencil->stride[2]);
p_a[10] = (char *)args[10].data_d + base10;
int base11 = args[11].dat->base_offset +
dat11 * 1 * (start[0] * args[11].stencil->stride[0]);
base11 = base11+ dat11 *
args[11].dat->size[0] *
(start[1] * args[11].stencil->stride[1]);
base11 = base11+ dat11 *
args[11].dat->size[0] *
args[11].dat->size[1] *
(start[2] * args[11].stencil->stride[2]);
p_a[11] = (char *)args[11].data_d + base11;
int base12 = args[12].dat->base_offset +
dat12 * 1 * (start[0] * args[12].stencil->stride[0]);
base12 = base12+ dat12 *
args[12].dat->size[0] *
(start[1] * args[12].stencil->stride[1]);
base12 = base12+ dat12 *
args[12].dat->size[0] *
args[12].dat->size[1] *
(start[2] * args[12].stencil->stride[2]);
p_a[12] = (char *)args[12].data_d + base12;
int base13 = args[13].dat->base_offset +
dat13 * 1 * (start[0] * args[13].stencil->stride[0]);
base13 = base13+ dat13 *
args[13].dat->size[0] *
(start[1] * args[13].stencil->stride[1]);
base13 = base13+ dat13 *
args[13].dat->size[0] *
args[13].dat->size[1] *
(start[2] * args[13].stencil->stride[2]);
p_a[13] = (char *)args[13].data_d + base13;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 14);
ops_halo_exchanges(args,14,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[104].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_accelerate_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)p_a[7],
(double *)p_a[8], (double *)p_a[9],
(double *)p_a[10], (double *)p_a[11],
(double *)p_a[12], (double *)p_a[13],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[104].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 14);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[8],range);
ops_set_halo_dirtybit3(&args[12],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[104].mpi_time += t2-t1;
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg7);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg8);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg9);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg10);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg11);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg12);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg13);
}
}
#ifdef OPS_LAZY
void ops_par_loop_accelerate_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11, ops_arg arg12, ops_arg arg13) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 104;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 104;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 14;
desc->args = (ops_arg*)malloc(14*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index;
desc->args[8] = arg8;
desc->hash = ((desc->hash << 5) + desc->hash) + arg8.dat->index;
desc->args[9] = arg9;
desc->hash = ((desc->hash << 5) + desc->hash) + arg9.dat->index;
desc->args[10] = arg10;
desc->hash = ((desc->hash << 5) + desc->hash) + arg10.dat->index;
desc->args[11] = arg11;
desc->hash = ((desc->hash << 5) + desc->hash) + arg11.dat->index;
desc->args[12] = arg12;
desc->hash = ((desc->hash << 5) + desc->hash) + arg12.dat->index;
desc->args[13] = arg13;
desc->hash = ((desc->hash << 5) + desc->hash) + arg13.dat->index;
desc->function = ops_par_loop_accelerate_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(104,"accelerate_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
b547ab678e51fa1b82c164a39c0a331ba73d1fa8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include "sample.cuh"
namespace pbw
{
namespace cuda
{
namespace kernels
{
namespace random
{
__global__ void sample(const float* __restrict__ A, float* __restrict__ buffer, const int seed, const size_t size)
{
const size_t grid = blockDim.x * blockIdx.x + threadIdx.x;
const size_t stride = blockDim.x * gridDim.x;
for (size_t index = grid; index < size; index += stride)
{
hiprandState_t state;
hiprand_init(seed + index, 0, 0, &state);
float u = hiprand_uniform(&state);
buffer[index] = powf(u, 1 / A[index]);
}
}
__global__ void sample(const double* __restrict__ A, double* __restrict__ buffer, const int seed, const size_t size)
{
const size_t grid = blockDim.x * blockIdx.x + threadIdx.x;
const size_t stride = blockDim.x * gridDim.x;
for (size_t index = grid; index < size; index += stride)
{
hiprandState_t state;
hiprand_init(seed + index, 0, 0, &state);
double u = hiprand_uniform(&state);
buffer[index] = pow(u, 1 / A[index]);
}
}
}
}
}
}
|
b547ab678e51fa1b82c164a39c0a331ba73d1fa8.cu
|
#pragma once
#include "sample.cuh"
namespace pbw
{
namespace cuda
{
namespace kernels
{
namespace random
{
__global__ void sample(const float* __restrict__ A, float* __restrict__ buffer, const int seed, const size_t size)
{
const size_t grid = blockDim.x * blockIdx.x + threadIdx.x;
const size_t stride = blockDim.x * gridDim.x;
for (size_t index = grid; index < size; index += stride)
{
curandState_t state;
curand_init(seed + index, 0, 0, &state);
float u = curand_uniform(&state);
buffer[index] = powf(u, 1 / A[index]);
}
}
__global__ void sample(const double* __restrict__ A, double* __restrict__ buffer, const int seed, const size_t size)
{
const size_t grid = blockDim.x * blockIdx.x + threadIdx.x;
const size_t stride = blockDim.x * gridDim.x;
for (size_t index = grid; index < size; index += stride)
{
curandState_t state;
curand_init(seed + index, 0, 0, &state);
double u = curand_uniform(&state);
buffer[index] = pow(u, 1 / A[index]);
}
}
}
}
}
}
|
cf6ab869825324ec721cf01d386fb97c3338762b.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2008-2009 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "hip/hip_runtime.h"
#include <cstdio>
#include "ocuutil/timing_pool.h"
#include "ocuequation/solver.h"
namespace ocu {
Solver::~Solver()
{
}
Solver::Solver() {
}
bool Solver::PostKernelDim(const char *kernel_name, dim3 grid, dim3 block)
{
if (!_wrapper.PostKernelDim(kernel_name, grid, block)) {
add_error();
return false;
}
return true;
}
bool Solver::PostKernelDim(const char *kernel_name, dim3 grid, dim3 block, int resolution)
{
if (!_wrapper.PostKernelDim(kernel_name, grid, block, resolution)) {
add_error();
return false;
}
return true;
}
bool Solver::PostKernel(const char *kernel_name) {
if (!_wrapper.PostKernel(kernel_name)) {
add_error();
return false;
}
return true;
}
bool Solver::PostKernel(const char *kernel_name, int resolution) {
if (!_wrapper.PostKernel(kernel_name, resolution)) {
add_error();
return false;
}
return true;
}
} // end namespace
|
cf6ab869825324ec721cf01d386fb97c3338762b.cu
|
/*
* Copyright 2008-2009 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda.h"
#include <cstdio>
#include "ocuutil/timing_pool.h"
#include "ocuequation/solver.h"
namespace ocu {
Solver::~Solver()
{
}
Solver::Solver() {
}
bool Solver::PostKernelDim(const char *kernel_name, dim3 grid, dim3 block)
{
if (!_wrapper.PostKernelDim(kernel_name, grid, block)) {
add_error();
return false;
}
return true;
}
bool Solver::PostKernelDim(const char *kernel_name, dim3 grid, dim3 block, int resolution)
{
if (!_wrapper.PostKernelDim(kernel_name, grid, block, resolution)) {
add_error();
return false;
}
return true;
}
bool Solver::PostKernel(const char *kernel_name) {
if (!_wrapper.PostKernel(kernel_name)) {
add_error();
return false;
}
return true;
}
bool Solver::PostKernel(const char *kernel_name, int resolution) {
if (!_wrapper.PostKernel(kernel_name, resolution)) {
add_error();
return false;
}
return true;
}
} // end namespace
|
706c45236fc811a145e9a8e3064aaee6cd8c5337.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common/errors.h"
#include "common/cpu_bitmap.h"
#define DIM 1024
#define rnd(x) (x * rand() / RAND_MAX)
#define INF 2e10f
#define SPHERES 2000
#define MEMORY_OPTIM 0
struct Sphere {
float red, green, blue;
float radius;
float x, y, z;
__device__ float hit(float bitmapX, float bitmapY, float *colorFalloff) {
float distX = bitmapX - x;
float distY = bitmapY - y;
if (distX * distX + distY * distY < radius * radius) {
float distZ = sqrtf(radius * radius - distX * distX - distY * distY);
*colorFalloff = distZ / sqrtf(radius * radius);
return distZ + z;
}
return -INF;
}
};
__global__ void kernel(Sphere* spheres, unsigned char* bitmap) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float bitmapX = (x - DIM / 2);
float bitmapY = (y - DIM / 2);
float red = 0, green = 0, blue = 0;
float maxDepth = -INF;
for (int i = 0; i < SPHERES; i++) {
float colorFalloff;
float depth = spheres[i].hit(bitmapX, bitmapY, &colorFalloff);
if (depth > maxDepth) {
red = spheres[i].red * colorFalloff;
green = spheres[i].green * colorFalloff;
blue = spheres[i].blue * colorFalloff;
maxDepth = depth;
}
}
bitmap[offset * 4 + 0] = (int) (red * 255);
bitmap[offset * 4 + 1] = (int) (green * 255);
bitmap[offset * 4 + 2] = (int) (blue * 255);
bitmap[offset * 4 + 3] = 255;
}
__constant__ Sphere spheresContainer[SPHERES];
__global__ void kernelMemoryOptim(unsigned char* bitmap) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float bitmapX = (x - DIM / 2);
float bitmapY = (y - DIM / 2);
float red = 0, green = 0, blue = 0;
float maxDepth = -INF;
for (int i = 0; i < SPHERES; i++) {
float colorFalloff;
float depth = spheresContainer[i].hit(bitmapX, bitmapY, &colorFalloff);
if (depth > maxDepth) {
red = spheresContainer[i].red * colorFalloff;
green = spheresContainer[i].green * colorFalloff;
blue = spheresContainer[i].blue * colorFalloff;
maxDepth = depth;
}
}
bitmap[offset * 4 + 0] = (int) (red * 255);
bitmap[offset * 4 + 1] = (int) (green * 255);
bitmap[offset * 4 + 2] = (int) (blue * 255);
bitmap[offset * 4 + 3] = 255;
}
struct DataBlock {
unsigned char *hostBitmap;
Sphere *spheres;
};
int main(void) {
DataBlock data;
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start, 0));
CPUBitmap bitmap(DIM, DIM, &data);
unsigned char *devBitmap;
# if MEMORY_OPTIM == 0
Sphere *devSpheres;
HANDLE_ERROR(hipMalloc((void**)&devSpheres, sizeof(Sphere) * SPHERES));
#endif
HANDLE_ERROR(hipMalloc((void**)&devBitmap, bitmap.image_size()));
Sphere *hostSpheres = (Sphere*)malloc(sizeof(Sphere) * SPHERES);
for (int i = 0; i < SPHERES; i++) {
hostSpheres[i].red = rnd(1.0f);
hostSpheres[i].green = rnd(1.0f);
hostSpheres[i].blue = rnd(1.0f);
hostSpheres[i].x = rnd(1000.0f) - 500;
hostSpheres[i].y = rnd(1000.0f) - 500;
hostSpheres[i].z = rnd(1000.0f) - 500;
hostSpheres[i].radius = rnd(100.0f) + 20;
}
#if MEMORY_OPTIM == 0
HANDLE_ERROR(hipMemcpy(devSpheres, hostSpheres, sizeof(Sphere) * SPHERES, hipMemcpyHostToDevice));
#else
hipMemcpyToSymbol(spheresContainer, hostSpheres, sizeof(Sphere) * SPHERES);
#endif
free(hostSpheres);
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
#if MEMORY_OPTIM == 0
hipLaunchKernelGGL(( kernel), dim3(grids),dim3(threads), 0, 0, devSpheres, devBitmap);
#else
hipLaunchKernelGGL(( kernelMemoryOptim), dim3(grids),dim3(threads), 0, 0, devBitmap);
#endif
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
float elapsedTime;
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop));
#if MEMORY_OPTIM == 0
printf("[Global memory] Time to generate: %3.1f ms\n", elapsedTime);
#else
printf("[Constant memory] Time to generate: %3.1f ms\n", elapsedTime);
#endif
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(), devBitmap, bitmap.image_size(), hipMemcpyDeviceToHost));
// bitmap.dump_ppm("image.ppm");
hipFree(devBitmap);
#if MEMORY_OPTIM == 0
hipFree(devSpheres);
#endif
}
|
706c45236fc811a145e9a8e3064aaee6cd8c5337.cu
|
#include "cuda.h"
#include "common/errors.h"
#include "common/cpu_bitmap.h"
#define DIM 1024
#define rnd(x) (x * rand() / RAND_MAX)
#define INF 2e10f
#define SPHERES 2000
#define MEMORY_OPTIM 0
struct Sphere {
float red, green, blue;
float radius;
float x, y, z;
__device__ float hit(float bitmapX, float bitmapY, float *colorFalloff) {
float distX = bitmapX - x;
float distY = bitmapY - y;
if (distX * distX + distY * distY < radius * radius) {
float distZ = sqrtf(radius * radius - distX * distX - distY * distY);
*colorFalloff = distZ / sqrtf(radius * radius);
return distZ + z;
}
return -INF;
}
};
__global__ void kernel(Sphere* spheres, unsigned char* bitmap) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float bitmapX = (x - DIM / 2);
float bitmapY = (y - DIM / 2);
float red = 0, green = 0, blue = 0;
float maxDepth = -INF;
for (int i = 0; i < SPHERES; i++) {
float colorFalloff;
float depth = spheres[i].hit(bitmapX, bitmapY, &colorFalloff);
if (depth > maxDepth) {
red = spheres[i].red * colorFalloff;
green = spheres[i].green * colorFalloff;
blue = spheres[i].blue * colorFalloff;
maxDepth = depth;
}
}
bitmap[offset * 4 + 0] = (int) (red * 255);
bitmap[offset * 4 + 1] = (int) (green * 255);
bitmap[offset * 4 + 2] = (int) (blue * 255);
bitmap[offset * 4 + 3] = 255;
}
__constant__ Sphere spheresContainer[SPHERES];
__global__ void kernelMemoryOptim(unsigned char* bitmap) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float bitmapX = (x - DIM / 2);
float bitmapY = (y - DIM / 2);
float red = 0, green = 0, blue = 0;
float maxDepth = -INF;
for (int i = 0; i < SPHERES; i++) {
float colorFalloff;
float depth = spheresContainer[i].hit(bitmapX, bitmapY, &colorFalloff);
if (depth > maxDepth) {
red = spheresContainer[i].red * colorFalloff;
green = spheresContainer[i].green * colorFalloff;
blue = spheresContainer[i].blue * colorFalloff;
maxDepth = depth;
}
}
bitmap[offset * 4 + 0] = (int) (red * 255);
bitmap[offset * 4 + 1] = (int) (green * 255);
bitmap[offset * 4 + 2] = (int) (blue * 255);
bitmap[offset * 4 + 3] = 255;
}
struct DataBlock {
unsigned char *hostBitmap;
Sphere *spheres;
};
int main(void) {
DataBlock data;
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
CPUBitmap bitmap(DIM, DIM, &data);
unsigned char *devBitmap;
# if MEMORY_OPTIM == 0
Sphere *devSpheres;
HANDLE_ERROR(cudaMalloc((void**)&devSpheres, sizeof(Sphere) * SPHERES));
#endif
HANDLE_ERROR(cudaMalloc((void**)&devBitmap, bitmap.image_size()));
Sphere *hostSpheres = (Sphere*)malloc(sizeof(Sphere) * SPHERES);
for (int i = 0; i < SPHERES; i++) {
hostSpheres[i].red = rnd(1.0f);
hostSpheres[i].green = rnd(1.0f);
hostSpheres[i].blue = rnd(1.0f);
hostSpheres[i].x = rnd(1000.0f) - 500;
hostSpheres[i].y = rnd(1000.0f) - 500;
hostSpheres[i].z = rnd(1000.0f) - 500;
hostSpheres[i].radius = rnd(100.0f) + 20;
}
#if MEMORY_OPTIM == 0
HANDLE_ERROR(cudaMemcpy(devSpheres, hostSpheres, sizeof(Sphere) * SPHERES, cudaMemcpyHostToDevice));
#else
cudaMemcpyToSymbol(spheresContainer, hostSpheres, sizeof(Sphere) * SPHERES);
#endif
free(hostSpheres);
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
#if MEMORY_OPTIM == 0
kernel<<<grids,threads>>>(devSpheres, devBitmap);
#else
kernelMemoryOptim<<<grids,threads>>>(devBitmap);
#endif
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
float elapsedTime;
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop));
#if MEMORY_OPTIM == 0
printf("[Global memory] Time to generate: %3.1f ms\n", elapsedTime);
#else
printf("[Constant memory] Time to generate: %3.1f ms\n", elapsedTime);
#endif
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), devBitmap, bitmap.image_size(), cudaMemcpyDeviceToHost));
// bitmap.dump_ppm("image.ppm");
cudaFree(devBitmap);
#if MEMORY_OPTIM == 0
cudaFree(devSpheres);
#endif
}
|
c0c12b32320ee5ea20c028031b39c90f0183ff35.hip
|
// !!! This is a file automatically generated by hipify!!!
#define _POSIX_C_SOURCE 200809L
#define BILLION 1000000000L
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <unistd.h>
#include <time.h>
#include <stdbool.h>
#include <string.h>
#include <float.h>
#include <vector>
#include <limits.h>
#include "distance.h"
#include "guttman.h"
#include "matrix-read.h"
#include "temperature.h"
#include "stress.h"
#include "analysis.h"
#include "random.h"
// Host code
int main(int argc, char** argv) {
hipblasHandle_t handle;
hipblasCreate(&handle);
int blocks; //number of blocks
int threads; //number of threads per block
int m; // number of items / objects; aka 'N'
int n; // dimensions of high-dimensional space;
int s; // dimension of low-dimensional space; aka 'L'
double epsilon; // threshhold for the stress variance; aka ''
int k_max; // maximum number of iterations; aka 'MAX'
float temp_min; // minimum temperature before final run of smacof
float alpha; // temperature reduction factor
int iterations; // number of test runs for gathering average performance
bool track_median; // flag for tracking statistics from median solution
bool track_median_solution; // flag for tracking median solution
bool track_median_stresses; // flag for tracking stresses from median solution
float* matrix;
// validate arguments
if(argc > 11) {
fprintf(stderr, "\nToo Many Arguments\n");
return 1;
} else if(argc < 10) {
fprintf(stderr, "\nToo Few Arguments\n");
return 1;
}
if (argc > 10) {
track_median = (strncmp(argv[10], "median", 6) == 0) ? true : false;
track_median_solution = (strncmp(argv[10], "median_solution", 15) == 0) ? true : false;
track_median_stresses = (strncmp(argv[10], "median_stresses", 15) == 0) ? true : false;
}
// parse arguments
blocks = atoi(argv[2]);
threads = atoi(argv[3]);
s = atoi(argv[4]);
epsilon = strtof(argv[5], NULL);
k_max = atoi(argv[6]);
temp_min = strtof(argv[7], NULL);
alpha = strtof(argv[8], NULL);
iterations = atoi(argv[9]);
// read in matrix from file
readMatrix(argv[1], &matrix, &m, &n);
fprintf(stderr, "\nM: %i, N: %i\nBlocks: %i, Threads: %i", m, n, blocks, threads);
size_t size_D = m*m*sizeof(float); // total size in memeory of dissimilarity & distance arrays
size_t size_Y = m*s*sizeof(float); // total size in memory of low-dimensional array;
float* Delta = (float*)malloc(size_D); // pointer to flattened MxM dissimilarity matrix; aka '' aka 'D'
float* Delta_prime = (float*)malloc(size_D);// pointer to temperature based dissimilarity matrix; aka '' aka 'delta hat'
float* Y = (float*)malloc(size_Y); // MxS set of finding points in the low-dimensional space
float* D = (float*)malloc(size_D); // MxM matrix of euclidean distance in target-dimensional space; aka 'projD'
float* Y_med;
struct stress* normalized_stresses;
std::vector<struct stress> stresses[iterations];
if (track_median) {
normalized_stresses = (struct stress*)malloc(iterations*sizeof(struct stress));
if (track_median_solution) {
Y_med = (float*)malloc(size_Y*iterations);
for (int i = 0; i < (m*s*iterations); i++) {
Y_med[i] = 0.0;
}
}
}
// Generate Dissimilarity matrix Delta from matrix
computeEuclideanDistances(matrix, Delta, m, n, m*n*sizeof(float), size_D, blocks, threads);
double total_stress = 0.0;
double max_stress = 0.0;
double min_stress = DBL_MAX;
unsigned long total_time = 0;
unsigned long max_time = 0;
unsigned long min_time = ULONG_MAX;
struct timespec* timer;
for(int iter = 0; iter < iterations; iter++) {
timer = startTimer();
// create initial random solution Y^[0]
matrixRandomPopulate(Y, m, s, blocks, threads);
// compute first distance matrix from random Y^[0]
computeEuclideanDistances(Y, D, m, s, size_Y, size_D, blocks, threads);
float temp = computeTemperature(Delta, size_D, m, s);
int k; // current interation
double error; // error value to determine if close enough approximation in lower dimensional space
double prev_stress;
double stress;
bool moreTemps = true;
// if tracking stresses, document initial stress
if (track_median_stresses) {
stress = computeNormalizedStress(Delta, D, size_D, m, blocks, threads);
stresses[iter].push_back((struct stress){stress, 0, stresses[iter].size()});
stress = 0.0f;
}
while(moreTemps) {
k = 0; // current interation
error = 1.0f; // error value to determine if close enough approximation in lower dimensional space
prev_stress = 0.0f;
stress = 0.0f;
if (temp > 0) {
computeNewDissimilarity(Delta, Delta_prime, size_D, temp, m, s, blocks, threads);
} else {
// compute first distance matrix from random Y^[0]
computeEuclideanDistances(Y, D, m, s, size_Y, size_D, blocks, threads);
}
// SMACOF
while(k < k_max && error > epsilon) {
if (temp > 0) {
// perform guttman transform
computeGuttmanTransform(handle, Y, D, Delta_prime, m, s, size_Y, size_D, blocks, threads);
computeEuclideanDistances(Y, D, m, s, size_Y, size_D, blocks, threads);
//calculate STRESS
stress = computeNormalizedStress(Delta, D, size_D, m, blocks, threads);
} else {
// perform guttman transform
computeGuttmanTransform(handle, Y, D, Delta, m, s, size_Y, size_D, blocks, threads);
computeEuclideanDistances(Y, D, m, s, size_Y, size_D, blocks, threads);
//calculate STRESS
stress = computeNormalizedStress(Delta, D, size_D, m, blocks, threads);
}
// update error and prev_stress values
error = fabs(stress - prev_stress);
prev_stress = stress;
// if tracking stresses, push stress to vector
if (track_median_stresses) {
stresses[iter].push_back((struct stress){stress, 0, stresses[iter].size()});
}
stress = 0.0f;
k += 1;
}
// quit after running with temp of 0
if(temp == 0.0) {
moreTemps = false;
// run once more with temp of 0 once reached temp_min
} else if(temp < temp_min || (temp * alpha) < temp_min) {
temp = 0.0;
// reduce temp by alpha and run smacof again
} else {
temp *= alpha;
}
}
// end time
long int current_time = stopTimer(timer);
total_time += current_time;
if(current_time > max_time) {
max_time = current_time;
}
if(current_time < min_time) {
min_time = current_time;
}
// compute normalized stress for comparing mapping quality
stress = computeNormalizedStress(Delta, D, size_D, m, blocks, threads);
// sum stress values for computing average stress
total_stress += stress;
// maintain maximum stress
if(stress > max_stress) {
max_stress = stress;
}
//maintain minimum stress
if(stress < min_stress) {
min_stress = stress;
}
// if tracking median results,
if (track_median) {
if(track_median_solution) {
for (int i = 0; i < (m*s); i++) {
Y_med[(m*s*iter)+i] = Y[i];
}
}
normalized_stresses[iter].value = stress;
normalized_stresses[iter].index = iter;
normalized_stresses[iter].time = current_time;
}
}
// print time results
printf("\nAVG_TIME: %0.8lf\n+MAX_TIME: %0.8lf\n-MIN_TIME: %0.8lf\n",
(double)(((long double)total_time/(long double)iterations)/(long double)BILLION), // average time
(double)((long double)max_time/(long double)BILLION), // max time
(double)((long double)min_time/(long double)BILLION) // min time
);
// print stress results
printf("\nAVG_STRESS: %0.8lf\n+MAX_STRESS: %0.8lf\n-MIN_STRESS: %0.8lf\n",
(total_stress/((double)iterations)), // average stress
max_stress, // max stress
min_stress // min stress
);
// if median is being tracked, print median results
if (track_median) {
struct stress* med = median(normalized_stresses, iterations);
printf("MEDIAN_STRESS: %0.8lf\nMEDIAN_TIME: %0.8lf\n", med->value, (double)(((long double)med->time)/((long double)BILLION)));
// if being tracked, print median solution
if (track_median_solution) {
printf("MEDIAN_SOLUTION: [\n");
for(int i = 0; i < m; i++) {
for(int j = 0; j < s; j++) {
printf("%f", Y_med[(med->index*m*s)+(i*s)+j]);
if (j != s-1) {
printf(" ");
}
}
printf("\n");
}
printf("]\n");
free(Y_med);
}
// if being tracked, print stresses from median iteration
if (track_median_stresses) {
printf("MEDIAN_STRESSES: [\n");
for (int i = 0; i < stresses[med->index].size(); i++) {
printf("%i, %0.8lf\n", stresses[med->index][i].index, stresses[med->index][i].value);
}
printf("]\n");
}
free(normalized_stresses);
}
free(matrix);
free(Delta);
free(Y);
free(D);
}
|
c0c12b32320ee5ea20c028031b39c90f0183ff35.cu
|
#define _POSIX_C_SOURCE 200809L
#define BILLION 1000000000L
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <unistd.h>
#include <time.h>
#include <stdbool.h>
#include <string.h>
#include <float.h>
#include <vector>
#include <limits.h>
#include "distance.h"
#include "guttman.h"
#include "matrix-read.h"
#include "temperature.h"
#include "stress.h"
#include "analysis.h"
#include "random.h"
// Host code
int main(int argc, char** argv) {
cublasHandle_t handle;
cublasCreate_v2(&handle);
int blocks; //number of blocks
int threads; //number of threads per block
int m; // number of items / objects; aka 'N'
int n; // dimensions of high-dimensional space;
int s; // dimension of low-dimensional space; aka 'L'
double epsilon; // threshhold for the stress variance; aka 'ε'
int k_max; // maximum number of iterations; aka 'MAX'
float temp_min; // minimum temperature before final run of smacof
float alpha; // temperature reduction factor
int iterations; // number of test runs for gathering average performance
bool track_median; // flag for tracking statistics from median solution
bool track_median_solution; // flag for tracking median solution
bool track_median_stresses; // flag for tracking stresses from median solution
float* matrix;
// validate arguments
if(argc > 11) {
fprintf(stderr, "\nToo Many Arguments\n");
return 1;
} else if(argc < 10) {
fprintf(stderr, "\nToo Few Arguments\n");
return 1;
}
if (argc > 10) {
track_median = (strncmp(argv[10], "median", 6) == 0) ? true : false;
track_median_solution = (strncmp(argv[10], "median_solution", 15) == 0) ? true : false;
track_median_stresses = (strncmp(argv[10], "median_stresses", 15) == 0) ? true : false;
}
// parse arguments
blocks = atoi(argv[2]);
threads = atoi(argv[3]);
s = atoi(argv[4]);
epsilon = strtof(argv[5], NULL);
k_max = atoi(argv[6]);
temp_min = strtof(argv[7], NULL);
alpha = strtof(argv[8], NULL);
iterations = atoi(argv[9]);
// read in matrix from file
readMatrix(argv[1], &matrix, &m, &n);
fprintf(stderr, "\nM: %i, N: %i\nBlocks: %i, Threads: %i", m, n, blocks, threads);
size_t size_D = m*m*sizeof(float); // total size in memeory of dissimilarity & distance arrays
size_t size_Y = m*s*sizeof(float); // total size in memory of low-dimensional array;
float* Delta = (float*)malloc(size_D); // pointer to flattened MxM dissimilarity matrix; aka 'Δ' aka 'D'
float* Delta_prime = (float*)malloc(size_D);// pointer to temperature based dissimilarity matrix; aka '⧊' aka 'delta hat'
float* Y = (float*)malloc(size_Y); // MxS set of finding points in the low-dimensional space
float* D = (float*)malloc(size_D); // MxM matrix of euclidean distance in target-dimensional space; aka 'projD'
float* Y_med;
struct stress* normalized_stresses;
std::vector<struct stress> stresses[iterations];
if (track_median) {
normalized_stresses = (struct stress*)malloc(iterations*sizeof(struct stress));
if (track_median_solution) {
Y_med = (float*)malloc(size_Y*iterations);
for (int i = 0; i < (m*s*iterations); i++) {
Y_med[i] = 0.0;
}
}
}
// Generate Dissimilarity matrix Delta from matrix
computeEuclideanDistances(matrix, Delta, m, n, m*n*sizeof(float), size_D, blocks, threads);
double total_stress = 0.0;
double max_stress = 0.0;
double min_stress = DBL_MAX;
unsigned long total_time = 0;
unsigned long max_time = 0;
unsigned long min_time = ULONG_MAX;
struct timespec* timer;
for(int iter = 0; iter < iterations; iter++) {
timer = startTimer();
// create initial random solution Y^[0]
matrixRandomPopulate(Y, m, s, blocks, threads);
// compute first distance matrix from random Y^[0]
computeEuclideanDistances(Y, D, m, s, size_Y, size_D, blocks, threads);
float temp = computeTemperature(Delta, size_D, m, s);
int k; // current interation
double error; // error value to determine if close enough approximation in lower dimensional space
double prev_stress;
double stress;
bool moreTemps = true;
// if tracking stresses, document initial stress
if (track_median_stresses) {
stress = computeNormalizedStress(Delta, D, size_D, m, blocks, threads);
stresses[iter].push_back((struct stress){stress, 0, stresses[iter].size()});
stress = 0.0f;
}
while(moreTemps) {
k = 0; // current interation
error = 1.0f; // error value to determine if close enough approximation in lower dimensional space
prev_stress = 0.0f;
stress = 0.0f;
if (temp > 0) {
computeNewDissimilarity(Delta, Delta_prime, size_D, temp, m, s, blocks, threads);
} else {
// compute first distance matrix from random Y^[0]
computeEuclideanDistances(Y, D, m, s, size_Y, size_D, blocks, threads);
}
// SMACOF
while(k < k_max && error > epsilon) {
if (temp > 0) {
// perform guttman transform
computeGuttmanTransform(handle, Y, D, Delta_prime, m, s, size_Y, size_D, blocks, threads);
computeEuclideanDistances(Y, D, m, s, size_Y, size_D, blocks, threads);
//calculate STRESS
stress = computeNormalizedStress(Delta, D, size_D, m, blocks, threads);
} else {
// perform guttman transform
computeGuttmanTransform(handle, Y, D, Delta, m, s, size_Y, size_D, blocks, threads);
computeEuclideanDistances(Y, D, m, s, size_Y, size_D, blocks, threads);
//calculate STRESS
stress = computeNormalizedStress(Delta, D, size_D, m, blocks, threads);
}
// update error and prev_stress values
error = fabs(stress - prev_stress);
prev_stress = stress;
// if tracking stresses, push stress to vector
if (track_median_stresses) {
stresses[iter].push_back((struct stress){stress, 0, stresses[iter].size()});
}
stress = 0.0f;
k += 1;
}
// quit after running with temp of 0
if(temp == 0.0) {
moreTemps = false;
// run once more with temp of 0 once reached temp_min
} else if(temp < temp_min || (temp * alpha) < temp_min) {
temp = 0.0;
// reduce temp by alpha and run smacof again
} else {
temp *= alpha;
}
}
// end time
long int current_time = stopTimer(timer);
total_time += current_time;
if(current_time > max_time) {
max_time = current_time;
}
if(current_time < min_time) {
min_time = current_time;
}
// compute normalized stress for comparing mapping quality
stress = computeNormalizedStress(Delta, D, size_D, m, blocks, threads);
// sum stress values for computing average stress
total_stress += stress;
// maintain maximum stress
if(stress > max_stress) {
max_stress = stress;
}
//maintain minimum stress
if(stress < min_stress) {
min_stress = stress;
}
// if tracking median results,
if (track_median) {
if(track_median_solution) {
for (int i = 0; i < (m*s); i++) {
Y_med[(m*s*iter)+i] = Y[i];
}
}
normalized_stresses[iter].value = stress;
normalized_stresses[iter].index = iter;
normalized_stresses[iter].time = current_time;
}
}
// print time results
printf("\nAVG_TIME: %0.8lf\n+MAX_TIME: %0.8lf\n-MIN_TIME: %0.8lf\n",
(double)(((long double)total_time/(long double)iterations)/(long double)BILLION), // average time
(double)((long double)max_time/(long double)BILLION), // max time
(double)((long double)min_time/(long double)BILLION) // min time
);
// print stress results
printf("\nAVG_STRESS: %0.8lf\n+MAX_STRESS: %0.8lf\n-MIN_STRESS: %0.8lf\n",
(total_stress/((double)iterations)), // average stress
max_stress, // max stress
min_stress // min stress
);
// if median is being tracked, print median results
if (track_median) {
struct stress* med = median(normalized_stresses, iterations);
printf("MEDIAN_STRESS: %0.8lf\nMEDIAN_TIME: %0.8lf\n", med->value, (double)(((long double)med->time)/((long double)BILLION)));
// if being tracked, print median solution
if (track_median_solution) {
printf("MEDIAN_SOLUTION: [\n");
for(int i = 0; i < m; i++) {
for(int j = 0; j < s; j++) {
printf("%f", Y_med[(med->index*m*s)+(i*s)+j]);
if (j != s-1) {
printf(" ");
}
}
printf("\n");
}
printf("]\n");
free(Y_med);
}
// if being tracked, print stresses from median iteration
if (track_median_stresses) {
printf("MEDIAN_STRESSES: [\n");
for (int i = 0; i < stresses[med->index].size(); i++) {
printf("%i, %0.8lf\n", stresses[med->index][i].index, stresses[med->index][i].value);
}
printf("]\n");
}
free(normalized_stresses);
}
free(matrix);
free(Delta);
free(Y);
free(D);
}
|
50546e5894cebbd497d4237689e74d89fd564b36.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#include <chrono>
#include <vector>
#include <algorithm>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
using namespace std;
/* ----- BEGIN Shared Library Export ----- */
// taken from http://stackoverflow.com/questions/2164827/explicitly-exporting-shared-library-functions-in-linux
#if defined(_MSC_VER)
// Microsoft
#define EXPORT __declspec(dllexport)
#define IMPORT __declspec(dllimport)
#elif defined(_GCC)
// GCC
#define EXPORT __attribute__((visibility("default")))
#define IMPORT
#else
// do nothing and hope for the best?
#define EXPORT
#define IMPORT
#pragma warning Unknown dynamic link import/export semantics.
#endif
/* ----- END Shared Library Export ----- */
/* ----- BEGIN Class Type ----- */
typedef int obj_id_t;
typedef int class_id_t;
/* ----- END Class Type ----- */
/* ----- BEGIN Environment (lexical variables) ----- */
// environment_struct must be defined later
typedef struct environment_struct environment_t;
/* ----- END Environment (lexical variables) ----- */
/* ----- BEGIN Forward declarations ----- */
typedef struct result_t result_t;
/* ----- END Forward declarations ----- */
// Define program result variable. Also contains benchmark numbers.
result_t *program_result;
// Variables for measuring time
chrono::high_resolution_clock::time_point start_time;
chrono::high_resolution_clock::time_point end_time;
/* ----- BEGIN Macros ----- */
#define timeStartMeasure() start_time = chrono::high_resolution_clock::now();
#define timeReportMeasure(result_var, variable_name) \
end_time = chrono::high_resolution_clock::now(); \
result_var->time_##variable_name = result_var->time_##variable_name + chrono::duration_cast<chrono::microseconds>(end_time - start_time).count();
/* ----- END Macros ----- */
/* ----- BEGIN Structs ----- */
struct variable_size_array_t {
void *content;
int size;
variable_size_array_t(void *content_ = NULL, int size_ = 0) : content(content_), size(size_) { };
static const variable_size_array_t error_return_value;
};
// error_return_value is used in case a host section terminates abnormally
const variable_size_array_t variable_size_array_t::error_return_value =
variable_size_array_t(NULL, 0);
/* ----- BEGIN Union Type ----- */
typedef union union_type_value {
obj_id_t object_id;
int int_;
float float_;
bool bool_;
void *pointer;
variable_size_array_t variable_size_array;
__host__ __device__ union_type_value(int value) : int_(value) { };
__host__ __device__ union_type_value(float value) : float_(value) { };
__host__ __device__ union_type_value(bool value) : bool_(value) { };
__host__ __device__ union_type_value(void *value) : pointer(value) { };
__host__ __device__ union_type_value(variable_size_array_t value) : variable_size_array(value) { };
__host__ __device__ static union_type_value from_object_id(obj_id_t value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_int(int value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_float(float value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_bool(bool value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_pointer(void *value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_variable_size_array_t(variable_size_array_t value)
{
return union_type_value(value);
}
} union_v_t;
typedef struct union_type_struct
{
class_id_t class_id;
union_v_t value;
__host__ __device__ union_type_struct(
class_id_t class_id_ = 0, union_v_t value_ = union_v_t(0))
: class_id(class_id_), value(value_) { };
static const union_type_struct error_return_value;
} union_t;
// error_return_value is used in case a host section terminates abnormally
const union_type_struct union_t::error_return_value = union_type_struct(0, union_v_t(0));
/* ----- END Union Type ----- */
typedef struct result_t {
variable_size_array_t result;
int last_error;
uint64_t time_setup_cuda;
uint64_t time_prepare_env;
uint64_t time_kernel;
uint64_t time_free_memory;
uint64_t time_transfer_memory;
uint64_t time_allocate_memory;
// Memory management
vector<void*> *device_allocations;
} result_t;
/* ----- END Structs ----- */
struct array_command_1 {
// Ikra::Symbolic::ArrayIndexCommand
int *result;
__host__ __device__ array_command_1(int *result = NULL) : result(result) { }
};
struct array_command_2 {
// Ikra::Symbolic::ArrayCombineCommand
int *result;
array_command_1 *input_0;
__host__ __device__ array_command_2(int *result = NULL, array_command_1 *input_0 = NULL) : result(result), input_0(input_0) { }
};
struct array_command_3 {
// Ikra::Symbolic::ArrayCombineCommand
int *result;
array_command_2 *input_0;
__host__ __device__ array_command_3(int *result = NULL, array_command_2 *input_0 = NULL) : result(result), input_0(input_0) { }
};
struct array_command_5 {
// Ikra::Symbolic::FixedSizeArrayInHostSectionCommand
int *result;
variable_size_array_t input_0;
__host__ __device__ array_command_5(int *result = NULL, variable_size_array_t input_0 = variable_size_array_t::error_return_value) : result(result), input_0(input_0) { }
int size() { return input_0.size; }
};
struct array_command_6 {
// Ikra::Symbolic::ArrayCombineCommand
int *result;
array_command_5 *input_0;
__host__ __device__ array_command_6(int *result = NULL, array_command_5 *input_0 = NULL) : result(result), input_0(input_0) { }
};
struct environment_struct
{
};
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
__global__ void kernel_5(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, _tid_);
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
// TODO: There should be a better to check if _block_k_3_ is already defined
#ifndef _block_k_3__func
#define _block_k_3__func
__device__ int _block_k_3_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_7(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_3_(_env_, _block_k_2_(_env_, _tid_));
}
}
// TODO: There should be a better to check if _block_k_6_ is already defined
#ifndef _block_k_6__func
#define _block_k_6__func
__device__ int _block_k_6_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_9(environment_t *_env_, int _num_threads_, int *_result_, int *_array_11_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_6_(_env_, _array_11_[_tid_]);
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
__global__ void kernel_12(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, _tid_);
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
// TODO: There should be a better to check if _block_k_3_ is already defined
#ifndef _block_k_3__func
#define _block_k_3__func
__device__ int _block_k_3_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_14(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_3_(_env_, _block_k_2_(_env_, _tid_));
}
}
// TODO: There should be a better to check if _block_k_6_ is already defined
#ifndef _block_k_6__func
#define _block_k_6__func
__device__ int _block_k_6_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_16(environment_t *_env_, int _num_threads_, int *_result_, int *_array_18_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_6_(_env_, _array_18_[_tid_]);
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
__global__ void kernel_19(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, _tid_);
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
// TODO: There should be a better to check if _block_k_3_ is already defined
#ifndef _block_k_3__func
#define _block_k_3__func
__device__ int _block_k_3_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_21(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_3_(_env_, _block_k_2_(_env_, _tid_));
}
}
// TODO: There should be a better to check if _block_k_6_ is already defined
#ifndef _block_k_6__func
#define _block_k_6__func
__device__ int _block_k_6_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_23(environment_t *_env_, int _num_threads_, int *_result_, int *_array_25_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_6_(_env_, _array_25_[_tid_]);
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
__global__ void kernel_26(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, _tid_);
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
// TODO: There should be a better to check if _block_k_3_ is already defined
#ifndef _block_k_3__func
#define _block_k_3__func
__device__ int _block_k_3_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_28(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_3_(_env_, _block_k_2_(_env_, _tid_));
}
}
// TODO: There should be a better to check if _block_k_6_ is already defined
#ifndef _block_k_6__func
#define _block_k_6__func
__device__ int _block_k_6_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_30(environment_t *_env_, int _num_threads_, int *_result_, int *_array_32_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_6_(_env_, _array_32_[_tid_]);
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
__global__ void kernel_33(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, _tid_);
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
// TODO: There should be a better to check if _block_k_3_ is already defined
#ifndef _block_k_3__func
#define _block_k_3__func
__device__ int _block_k_3_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_35(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_3_(_env_, _block_k_2_(_env_, _tid_));
}
}
// TODO: There should be a better to check if _block_k_6_ is already defined
#ifndef _block_k_6__func
#define _block_k_6__func
__device__ int _block_k_6_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_37(environment_t *_env_, int _num_threads_, int *_result_, int *_array_39_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_6_(_env_, _array_39_[_tid_]);
}
}
#undef checkErrorReturn
#define checkErrorReturn(result_var, expr) \
if (result_var->last_error = expr) \
{\
hipError_t error = hipGetLastError();\
printf("!!! Cuda Failure %s:%d (%i): '%s'\n", __FILE__, __LINE__, expr, hipGetErrorString(error));\
hipDeviceReset();\
return variable_size_array_t::error_return_value;\
}
variable_size_array_t _host_section__(environment_t *host_env, environment_t *dev_env, result_t *program_result)
{
array_command_2 * input = new array_command_2();
int i;
union_t _ssa_var_a_2;
union_t _ssa_var_a_1;
{
_ssa_var_a_1 = union_t(10, union_v_t::from_pointer((void *) input));
for (i = 1; i <= (100000 - 1); i++)
{
_ssa_var_a_2 = union_t(12, union_v_t::from_pointer((void *) new array_command_6(NULL, new array_command_5(NULL, ({
variable_size_array_t _polytemp_result_1;
{
union_t _polytemp_expr_2 = _ssa_var_a_1;
switch (_polytemp_expr_2.class_id)
{
case 10: /* [Ikra::Symbolic::ArrayCombineCommand, size = 511] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_1 = ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 511]
array_command_2 * cmd = (array_command_2 *) _polytemp_expr_2.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_6;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_6, (sizeof(int) * 511)));
program_result->device_allocations->push_back(_kernel_result_6);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_5), dim3(2), dim3(256), 0, 0, dev_env, 511, _kernel_result_6);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_6;
}
variable_size_array_t((void *) cmd->result, 511);
}); break;
case 11: /* [Ikra::Symbolic::ArrayCombineCommand, size = 511] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_1 = ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 511]: [SendNode: [SendNode: [SendNode: [LVarReadNode: _ssa_var_a_1].__call__()].to_command()].pmap()]
array_command_3 * cmd = (array_command_3 *) _polytemp_expr_2.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_8;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_8, (sizeof(int) * 511)));
program_result->device_allocations->push_back(_kernel_result_8);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_7), dim3(2), dim3(256), 0, 0, dev_env, 511, _kernel_result_8);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_8;
}
variable_size_array_t((void *) cmd->result, 511);
}); break;
case 12: /* [Ikra::Symbolic::ArrayCombineCommand, size = 511] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_1 = ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 511]: [SendNode: [SendNode: [SendNode: [LVarReadNode: _ssa_var_a_1].__call__()].to_command()].pmap()]
array_command_6 * cmd = (array_command_6 *) _polytemp_expr_2.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_10;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_10, (sizeof(int) * 511)));
program_result->device_allocations->push_back(_kernel_result_10);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_9), dim3(2), dim3(256), 0, 0, dev_env, 511, _kernel_result_10, ((int *) ((int *) cmd->input_0->input_0.content)));
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_10;
}
variable_size_array_t((void *) cmd->result, 511);
}); break;
}
}
_polytemp_result_1;
})))));
_ssa_var_a_1 = _ssa_var_a_2;
}
i--;
return ({
variable_size_array_t _polytemp_result_9;
{
union_t _polytemp_expr_10 = _ssa_var_a_1;
switch (_polytemp_expr_10.class_id)
{
case 10: /* [Ikra::Symbolic::ArrayCombineCommand, size = 511] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_9 = ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 511]
array_command_2 * cmd = (array_command_2 *) _polytemp_expr_10.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_34;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_34, (sizeof(int) * 511)));
program_result->device_allocations->push_back(_kernel_result_34);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_33), dim3(2), dim3(256), 0, 0, dev_env, 511, _kernel_result_34);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_34;
}
variable_size_array_t((void *) cmd->result, 511);
}); break;
case 11: /* [Ikra::Symbolic::ArrayCombineCommand, size = 511] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_9 = ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 511]: [SendNode: [SendNode: [SendNode: [LVarReadNode: _ssa_var_a_1].__call__()].to_command()].pmap()]
array_command_3 * cmd = (array_command_3 *) _polytemp_expr_10.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_36;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_36, (sizeof(int) * 511)));
program_result->device_allocations->push_back(_kernel_result_36);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_35), dim3(2), dim3(256), 0, 0, dev_env, 511, _kernel_result_36);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_36;
}
variable_size_array_t((void *) cmd->result, 511);
}); break;
case 12: /* [Ikra::Symbolic::ArrayCombineCommand, size = 511] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_9 = ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 511]: [SendNode: [SendNode: [SendNode: [LVarReadNode: _ssa_var_a_1].__call__()].to_command()].pmap()]
array_command_6 * cmd = (array_command_6 *) _polytemp_expr_10.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_38;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_38, (sizeof(int) * 511)));
program_result->device_allocations->push_back(_kernel_result_38);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_37), dim3(2), dim3(256), 0, 0, dev_env, 511, _kernel_result_38, ((int *) ((int *) cmd->input_0->input_0.content)));
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_38;
}
variable_size_array_t((void *) cmd->result, 511);
}); break;
}
}
_polytemp_result_9;
});
}
}
#undef checkErrorReturn
#define checkErrorReturn(result_var, expr) \
if (result_var->last_error = expr) \
{\
hipError_t error = hipGetLastError();\
printf("!!! Cuda Failure %s:%d (%i): '%s'\n", __FILE__, __LINE__, expr, hipGetErrorString(error));\
hipDeviceReset();\
return result_var;\
}
extern "C" EXPORT result_t *launch_kernel(environment_t *host_env)
{
// CUDA Initialization
program_result = new result_t();
program_result->device_allocations = new vector<void*>();
timeStartMeasure();
hipError_t cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
program_result->last_error = -1;
return program_result;
}
checkErrorReturn(program_result, hipFree(0));
timeReportMeasure(program_result, setup_cuda);
/* Prepare environment */
/* Allocate device environment and copy over struct */
environment_t *dev_env;
timeStartMeasure();
checkErrorReturn(program_result, hipMalloc(&dev_env, sizeof(environment_t)));
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
checkErrorReturn(program_result, hipMemcpy(dev_env, host_env, sizeof(environment_t), hipMemcpyHostToDevice));
timeReportMeasure(program_result, transfer_memory);
/* Copy back memory and set pointer of result */
program_result->result = ({
variable_size_array_t device_array = _host_section__(host_env, dev_env, program_result);
int * tmp_result = (int *) malloc(sizeof(int) * device_array.size);
timeStartMeasure();
checkErrorReturn(program_result, hipMemcpy(tmp_result, device_array.content, sizeof(int) * device_array.size, hipMemcpyDeviceToHost));
timeReportMeasure(program_result, transfer_memory);
variable_size_array_t((void *) tmp_result, device_array.size);
});
/* Free device memory */
timeStartMeasure();
for (
auto device_ptr = program_result->device_allocations->begin();
device_ptr < program_result->device_allocations->end();
device_ptr++)
{
checkErrorReturn(program_result, hipFree(*device_ptr));
}
delete program_result->device_allocations;
timeReportMeasure(program_result, free_memory);
return program_result;
}
|
50546e5894cebbd497d4237689e74d89fd564b36.cu
|
#include <stdio.h>
#include <assert.h>
#include <chrono>
#include <vector>
#include <algorithm>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
using namespace std;
/* ----- BEGIN Shared Library Export ----- */
// taken from http://stackoverflow.com/questions/2164827/explicitly-exporting-shared-library-functions-in-linux
#if defined(_MSC_VER)
// Microsoft
#define EXPORT __declspec(dllexport)
#define IMPORT __declspec(dllimport)
#elif defined(_GCC)
// GCC
#define EXPORT __attribute__((visibility("default")))
#define IMPORT
#else
// do nothing and hope for the best?
#define EXPORT
#define IMPORT
#pragma warning Unknown dynamic link import/export semantics.
#endif
/* ----- END Shared Library Export ----- */
/* ----- BEGIN Class Type ----- */
typedef int obj_id_t;
typedef int class_id_t;
/* ----- END Class Type ----- */
/* ----- BEGIN Environment (lexical variables) ----- */
// environment_struct must be defined later
typedef struct environment_struct environment_t;
/* ----- END Environment (lexical variables) ----- */
/* ----- BEGIN Forward declarations ----- */
typedef struct result_t result_t;
/* ----- END Forward declarations ----- */
// Define program result variable. Also contains benchmark numbers.
result_t *program_result;
// Variables for measuring time
chrono::high_resolution_clock::time_point start_time;
chrono::high_resolution_clock::time_point end_time;
/* ----- BEGIN Macros ----- */
#define timeStartMeasure() start_time = chrono::high_resolution_clock::now();
#define timeReportMeasure(result_var, variable_name) \
end_time = chrono::high_resolution_clock::now(); \
result_var->time_##variable_name = result_var->time_##variable_name + chrono::duration_cast<chrono::microseconds>(end_time - start_time).count();
/* ----- END Macros ----- */
/* ----- BEGIN Structs ----- */
struct variable_size_array_t {
void *content;
int size;
variable_size_array_t(void *content_ = NULL, int size_ = 0) : content(content_), size(size_) { };
static const variable_size_array_t error_return_value;
};
// error_return_value is used in case a host section terminates abnormally
const variable_size_array_t variable_size_array_t::error_return_value =
variable_size_array_t(NULL, 0);
/* ----- BEGIN Union Type ----- */
typedef union union_type_value {
obj_id_t object_id;
int int_;
float float_;
bool bool_;
void *pointer;
variable_size_array_t variable_size_array;
__host__ __device__ union_type_value(int value) : int_(value) { };
__host__ __device__ union_type_value(float value) : float_(value) { };
__host__ __device__ union_type_value(bool value) : bool_(value) { };
__host__ __device__ union_type_value(void *value) : pointer(value) { };
__host__ __device__ union_type_value(variable_size_array_t value) : variable_size_array(value) { };
__host__ __device__ static union_type_value from_object_id(obj_id_t value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_int(int value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_float(float value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_bool(bool value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_pointer(void *value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_variable_size_array_t(variable_size_array_t value)
{
return union_type_value(value);
}
} union_v_t;
typedef struct union_type_struct
{
class_id_t class_id;
union_v_t value;
__host__ __device__ union_type_struct(
class_id_t class_id_ = 0, union_v_t value_ = union_v_t(0))
: class_id(class_id_), value(value_) { };
static const union_type_struct error_return_value;
} union_t;
// error_return_value is used in case a host section terminates abnormally
const union_type_struct union_t::error_return_value = union_type_struct(0, union_v_t(0));
/* ----- END Union Type ----- */
typedef struct result_t {
variable_size_array_t result;
int last_error;
uint64_t time_setup_cuda;
uint64_t time_prepare_env;
uint64_t time_kernel;
uint64_t time_free_memory;
uint64_t time_transfer_memory;
uint64_t time_allocate_memory;
// Memory management
vector<void*> *device_allocations;
} result_t;
/* ----- END Structs ----- */
struct array_command_1 {
// Ikra::Symbolic::ArrayIndexCommand
int *result;
__host__ __device__ array_command_1(int *result = NULL) : result(result) { }
};
struct array_command_2 {
// Ikra::Symbolic::ArrayCombineCommand
int *result;
array_command_1 *input_0;
__host__ __device__ array_command_2(int *result = NULL, array_command_1 *input_0 = NULL) : result(result), input_0(input_0) { }
};
struct array_command_3 {
// Ikra::Symbolic::ArrayCombineCommand
int *result;
array_command_2 *input_0;
__host__ __device__ array_command_3(int *result = NULL, array_command_2 *input_0 = NULL) : result(result), input_0(input_0) { }
};
struct array_command_5 {
// Ikra::Symbolic::FixedSizeArrayInHostSectionCommand
int *result;
variable_size_array_t input_0;
__host__ __device__ array_command_5(int *result = NULL, variable_size_array_t input_0 = variable_size_array_t::error_return_value) : result(result), input_0(input_0) { }
int size() { return input_0.size; }
};
struct array_command_6 {
// Ikra::Symbolic::ArrayCombineCommand
int *result;
array_command_5 *input_0;
__host__ __device__ array_command_6(int *result = NULL, array_command_5 *input_0 = NULL) : result(result), input_0(input_0) { }
};
struct environment_struct
{
};
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
__global__ void kernel_5(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, _tid_);
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
// TODO: There should be a better to check if _block_k_3_ is already defined
#ifndef _block_k_3__func
#define _block_k_3__func
__device__ int _block_k_3_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_7(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_3_(_env_, _block_k_2_(_env_, _tid_));
}
}
// TODO: There should be a better to check if _block_k_6_ is already defined
#ifndef _block_k_6__func
#define _block_k_6__func
__device__ int _block_k_6_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_9(environment_t *_env_, int _num_threads_, int *_result_, int *_array_11_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_6_(_env_, _array_11_[_tid_]);
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
__global__ void kernel_12(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, _tid_);
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
// TODO: There should be a better to check if _block_k_3_ is already defined
#ifndef _block_k_3__func
#define _block_k_3__func
__device__ int _block_k_3_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_14(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_3_(_env_, _block_k_2_(_env_, _tid_));
}
}
// TODO: There should be a better to check if _block_k_6_ is already defined
#ifndef _block_k_6__func
#define _block_k_6__func
__device__ int _block_k_6_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_16(environment_t *_env_, int _num_threads_, int *_result_, int *_array_18_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_6_(_env_, _array_18_[_tid_]);
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
__global__ void kernel_19(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, _tid_);
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
// TODO: There should be a better to check if _block_k_3_ is already defined
#ifndef _block_k_3__func
#define _block_k_3__func
__device__ int _block_k_3_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_21(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_3_(_env_, _block_k_2_(_env_, _tid_));
}
}
// TODO: There should be a better to check if _block_k_6_ is already defined
#ifndef _block_k_6__func
#define _block_k_6__func
__device__ int _block_k_6_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_23(environment_t *_env_, int _num_threads_, int *_result_, int *_array_25_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_6_(_env_, _array_25_[_tid_]);
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
__global__ void kernel_26(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, _tid_);
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
// TODO: There should be a better to check if _block_k_3_ is already defined
#ifndef _block_k_3__func
#define _block_k_3__func
__device__ int _block_k_3_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_28(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_3_(_env_, _block_k_2_(_env_, _tid_));
}
}
// TODO: There should be a better to check if _block_k_6_ is already defined
#ifndef _block_k_6__func
#define _block_k_6__func
__device__ int _block_k_6_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_30(environment_t *_env_, int _num_threads_, int *_result_, int *_array_32_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_6_(_env_, _array_32_[_tid_]);
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
__global__ void kernel_33(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, _tid_);
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int j)
{
{
return (j + 1);
}
}
#endif
// TODO: There should be a better to check if _block_k_3_ is already defined
#ifndef _block_k_3__func
#define _block_k_3__func
__device__ int _block_k_3_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_35(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_3_(_env_, _block_k_2_(_env_, _tid_));
}
}
// TODO: There should be a better to check if _block_k_6_ is already defined
#ifndef _block_k_6__func
#define _block_k_6__func
__device__ int _block_k_6_(environment_t *_env_, int k)
{
{
return (k + 1);
}
}
#endif
__global__ void kernel_37(environment_t *_env_, int _num_threads_, int *_result_, int *_array_39_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_6_(_env_, _array_39_[_tid_]);
}
}
#undef checkErrorReturn
#define checkErrorReturn(result_var, expr) \
if (result_var->last_error = expr) \
{\
cudaError_t error = cudaGetLastError();\
printf("!!! Cuda Failure %s:%d (%i): '%s'\n", __FILE__, __LINE__, expr, cudaGetErrorString(error));\
cudaDeviceReset();\
return variable_size_array_t::error_return_value;\
}
variable_size_array_t _host_section__(environment_t *host_env, environment_t *dev_env, result_t *program_result)
{
array_command_2 * input = new array_command_2();
int i;
union_t _ssa_var_a_2;
union_t _ssa_var_a_1;
{
_ssa_var_a_1 = union_t(10, union_v_t::from_pointer((void *) input));
for (i = 1; i <= (100000 - 1); i++)
{
_ssa_var_a_2 = union_t(12, union_v_t::from_pointer((void *) new array_command_6(NULL, new array_command_5(NULL, ({
variable_size_array_t _polytemp_result_1;
{
union_t _polytemp_expr_2 = _ssa_var_a_1;
switch (_polytemp_expr_2.class_id)
{
case 10: /* [Ikra::Symbolic::ArrayCombineCommand, size = 511] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_1 = ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 511]
array_command_2 * cmd = (array_command_2 *) _polytemp_expr_2.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_6;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_6, (sizeof(int) * 511)));
program_result->device_allocations->push_back(_kernel_result_6);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_5<<<2, 256>>>(dev_env, 511, _kernel_result_6);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_6;
}
variable_size_array_t((void *) cmd->result, 511);
}); break;
case 11: /* [Ikra::Symbolic::ArrayCombineCommand, size = 511] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_1 = ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 511]: [SendNode: [SendNode: [SendNode: [LVarReadNode: _ssa_var_a_1].__call__()].to_command()].pmap()]
array_command_3 * cmd = (array_command_3 *) _polytemp_expr_2.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_8;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_8, (sizeof(int) * 511)));
program_result->device_allocations->push_back(_kernel_result_8);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_7<<<2, 256>>>(dev_env, 511, _kernel_result_8);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_8;
}
variable_size_array_t((void *) cmd->result, 511);
}); break;
case 12: /* [Ikra::Symbolic::ArrayCombineCommand, size = 511] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_1 = ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 511]: [SendNode: [SendNode: [SendNode: [LVarReadNode: _ssa_var_a_1].__call__()].to_command()].pmap()]
array_command_6 * cmd = (array_command_6 *) _polytemp_expr_2.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_10;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_10, (sizeof(int) * 511)));
program_result->device_allocations->push_back(_kernel_result_10);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_9<<<2, 256>>>(dev_env, 511, _kernel_result_10, ((int *) ((int *) cmd->input_0->input_0.content)));
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_10;
}
variable_size_array_t((void *) cmd->result, 511);
}); break;
}
}
_polytemp_result_1;
})))));
_ssa_var_a_1 = _ssa_var_a_2;
}
i--;
return ({
variable_size_array_t _polytemp_result_9;
{
union_t _polytemp_expr_10 = _ssa_var_a_1;
switch (_polytemp_expr_10.class_id)
{
case 10: /* [Ikra::Symbolic::ArrayCombineCommand, size = 511] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_9 = ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 511]
array_command_2 * cmd = (array_command_2 *) _polytemp_expr_10.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_34;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_34, (sizeof(int) * 511)));
program_result->device_allocations->push_back(_kernel_result_34);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_33<<<2, 256>>>(dev_env, 511, _kernel_result_34);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_34;
}
variable_size_array_t((void *) cmd->result, 511);
}); break;
case 11: /* [Ikra::Symbolic::ArrayCombineCommand, size = 511] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_9 = ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 511]: [SendNode: [SendNode: [SendNode: [LVarReadNode: _ssa_var_a_1].__call__()].to_command()].pmap()]
array_command_3 * cmd = (array_command_3 *) _polytemp_expr_10.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_36;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_36, (sizeof(int) * 511)));
program_result->device_allocations->push_back(_kernel_result_36);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_35<<<2, 256>>>(dev_env, 511, _kernel_result_36);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_36;
}
variable_size_array_t((void *) cmd->result, 511);
}); break;
case 12: /* [Ikra::Symbolic::ArrayCombineCommand, size = 511] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_9 = ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 511]: [SendNode: [SendNode: [SendNode: [LVarReadNode: _ssa_var_a_1].__call__()].to_command()].pmap()]
array_command_6 * cmd = (array_command_6 *) _polytemp_expr_10.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_38;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_38, (sizeof(int) * 511)));
program_result->device_allocations->push_back(_kernel_result_38);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_37<<<2, 256>>>(dev_env, 511, _kernel_result_38, ((int *) ((int *) cmd->input_0->input_0.content)));
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_38;
}
variable_size_array_t((void *) cmd->result, 511);
}); break;
}
}
_polytemp_result_9;
});
}
}
#undef checkErrorReturn
#define checkErrorReturn(result_var, expr) \
if (result_var->last_error = expr) \
{\
cudaError_t error = cudaGetLastError();\
printf("!!! Cuda Failure %s:%d (%i): '%s'\n", __FILE__, __LINE__, expr, cudaGetErrorString(error));\
cudaDeviceReset();\
return result_var;\
}
extern "C" EXPORT result_t *launch_kernel(environment_t *host_env)
{
// CUDA Initialization
program_result = new result_t();
program_result->device_allocations = new vector<void*>();
timeStartMeasure();
cudaError_t cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
program_result->last_error = -1;
return program_result;
}
checkErrorReturn(program_result, cudaFree(0));
timeReportMeasure(program_result, setup_cuda);
/* Prepare environment */
/* Allocate device environment and copy over struct */
environment_t *dev_env;
timeStartMeasure();
checkErrorReturn(program_result, cudaMalloc(&dev_env, sizeof(environment_t)));
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
checkErrorReturn(program_result, cudaMemcpy(dev_env, host_env, sizeof(environment_t), cudaMemcpyHostToDevice));
timeReportMeasure(program_result, transfer_memory);
/* Copy back memory and set pointer of result */
program_result->result = ({
variable_size_array_t device_array = _host_section__(host_env, dev_env, program_result);
int * tmp_result = (int *) malloc(sizeof(int) * device_array.size);
timeStartMeasure();
checkErrorReturn(program_result, cudaMemcpy(tmp_result, device_array.content, sizeof(int) * device_array.size, cudaMemcpyDeviceToHost));
timeReportMeasure(program_result, transfer_memory);
variable_size_array_t((void *) tmp_result, device_array.size);
});
/* Free device memory */
timeStartMeasure();
for (
auto device_ptr = program_result->device_allocations->begin();
device_ptr < program_result->device_allocations->end();
device_ptr++)
{
checkErrorReturn(program_result, cudaFree(*device_ptr));
}
delete program_result->device_allocations;
timeReportMeasure(program_result, free_memory);
return program_result;
}
|
1a7f93455af1ed629006dd45d5e43b4b6dcdbb91.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <array/NDArrayFactory.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/MmulHelper.h>
#include <helpers/ShapeUtils.h>
#include <ops/declarable/helpers/top_k.h>
//#include <ops/declarable/generic/helpers/BroadcastHelper.h>
#include <cusolverDn.h>
#include <exceptions/cuda_exception.h>
namespace sd {
namespace ops {
namespace helpers {
// ------------------------------------------------------------------------------------------------------------------ //
// invert the second diagonal for lower diagonal matrix
template <typename T>
static SD_KERNEL void invertKernelLow(void *invertedBuf, const sd::LongType *invertedShape, const void *inputBuf,
const sd::LongType *inputShape, sd::LongType n) {
auto inverted = reinterpret_cast<T *>(invertedBuf);
auto input = reinterpret_cast<const T *>(inputBuf);
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int i = start + 1; i < n; i += step) {
sd::LongType pos[] = {i, i - 1};
sd::LongType posX[] = {i, i};
sd::LongType posY[] = {i - 1, i - 1};
auto xIndex = shape::getOffset(inputShape, pos);
auto dxIndex = shape::getOffset(inputShape, posX);
auto dyIndex = shape::getOffset(inputShape, posY);
auto zIndex = shape::getOffset(invertedShape, pos);
// invert lower triangular matrix
inverted[zIndex] = -input[xIndex] / (input[dxIndex] * input[dyIndex]);
// math::atomics::sd_atomicAdd(&inverted[zIndex], - input[xIndex] * inverted[iIndex] / input[dIndex]);
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// invert diagonal vals to upper diagonal matrix
template <typename T>
static SD_KERNEL void upvertKernel(void *invertedBuf, const sd::LongType *invertedShape, const void *inputBuf,
const sd::LongType *inputShape, sd::LongType n) {
auto inverted = reinterpret_cast<T *>(invertedBuf);
auto input = reinterpret_cast<const T *>(inputBuf);
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int i = start; i < n; i += step) {
sd::LongType pos[] = {i, i};
auto xIndex = shape::getOffset(inputShape, pos);
auto zIndex = shape::getOffset(invertedShape, pos);
// invert diagonal elements
inverted[zIndex] /= input[xIndex];
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// invert upper second diagonal
template <typename T>
static SD_KERNEL void upvertKernelUp(void *invertedBuf, const sd::LongType *invertedShape, const void *inputBuf,
const sd::LongType *inputShape, sd::LongType n) {
__shared__ T *inverted;
__shared__ const T *input;
if (threadIdx.x == 0) {
inverted = reinterpret_cast<T *>(invertedBuf);
input = reinterpret_cast<const T *>(inputBuf);
}
__syncthreads();
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int i = start; i < n - 1; i += step) {
sd::LongType pos[] = {i, i + 1};
sd::LongType posX[] = {i + 1, i + 1};
auto xIndex = shape::getOffset(inputShape, pos);
auto iIndex = shape::getOffset(invertedShape, posX);
auto zIndex = shape::getOffset(invertedShape, pos);
// invert upper matrix
math::atomics::sd_atomicAdd(&inverted[zIndex], -input[xIndex] * inverted[iIndex]); // / input[yIndex]);
// inputMatrix->t<T>(i, i + 1) * invertedMatrix->t<T>(i + 1, i + 1) / inputMatrix->t<T>(i, i)
}
}
// ------------------------------------------------------------------------------------------------------------------ //
template <typename T>
static SD_KERNEL void invertLowKernel(void *invertedBuf, const sd::LongType *invertedShape, const void *inputBuf,
const sd::LongType *inputShape, sd::LongType n) {
auto input = reinterpret_cast<const T *>(inputBuf);
auto inverted = reinterpret_cast<T *>(invertedBuf);
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
for (int i = tid + 2; i < n; i += step) {
for (int j = i - 2; j >= 0; --j)
for (int k = 0; k < i; k++) {
sd::LongType posZ[] = {i, j};
sd::LongType posY[] = {k, j};
sd::LongType posX[] = {i, k};
sd::LongType posD[] = {i, i};
auto xIndex = shape::getOffset(inputShape, posX);
auto yIndex = shape::getOffset(invertedShape, posY);
auto dIndex = shape::getOffset(inputShape, posD);
auto zIndex = shape::getOffset(invertedShape, posZ);
// invert non-diagonal elements
math::atomics::sd_atomicAdd(&inverted[zIndex], -inverted[yIndex] * input[xIndex] / input[dIndex]);
}
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// Invertion of upper triangular matrix non-diagonal elements when main and second diagonals already processed
template <typename T>
static SD_KERNEL void invertUpKernel(void *invertedBuf, const sd::LongType *invertedShape, const void *inputBuf,
const sd::LongType *inputShape, sd::LongType n) {
auto inverted = reinterpret_cast<T *>(invertedBuf);
;
auto input = reinterpret_cast<const T *>(inputBuf);
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int i = (int)n - tid - 2; i >= 0; i -= step) {
for (int j = i + 2; j < (int)n; j++)
for (int k = i; k < (int)n; k++) {
sd::LongType posZ[] = {i, j};
sd::LongType posY[] = {k, j};
sd::LongType posX[] = {i, k};
// inversion with Joardan Gauss transformation
auto xIndex = shape::getOffset(inputShape, posX);
auto yIndex = shape::getOffset(invertedShape, posY);
auto zIndex = shape::getOffset(invertedShape, posZ);
// invert upper non-diagonal elements
math::atomics::sd_atomicAdd(&inverted[zIndex], -inverted[yIndex] * input[xIndex]);
}
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// procedure to invert lower-triangular matrix.
// In current case lower triangular matrix has main diagonal with general values
//
template <typename T>
static void invertLowerMatrix_(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) {
int n = inputMatrix->rows();
invertedMatrix->setIdentity();
if (inputMatrix->isIdentityMatrix()) return;
auto stream = context->getCudaStream();
// invert lower matrix
// invert main diagonal
hipLaunchKernelGGL(( upvertKernel<T>), dim3(1), dim3(n), 512, *stream, invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(),
inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n);
// invert the second diagonal
hipLaunchKernelGGL(( invertKernelLow<T>), dim3(1), dim3(n), 512, *stream, invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(),
inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n);
// invert non-diagonal elements
hipLaunchKernelGGL(( invertLowKernel<T>), dim3(n), dim3(n), 512, *stream, invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(),
inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n);
}
// ------------------------------------------------------------------------------------------------------------------ //
// caller for invert lower matrix routine
void invertLowerMatrix(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) {
NDArray::prepareSpecialUse({invertedMatrix}, {inputMatrix});
BUILD_SINGLE_SELECTOR(inputMatrix->dataType(), invertLowerMatrix_, (context, inputMatrix, invertedMatrix),
SD_FLOAT_NATIVE);
NDArray::registerSpecialUse({invertedMatrix}, {inputMatrix});
}
// ------------------------------------------------------------------------------------------------------------------ //
// procedure to invert upper-triangular matrix.
// In current case upper triangular matrix has main diagonal with all ones on it.
template <typename T>
static void invertUpperMatrix_(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) {
int n = inputMatrix->rows();
invertedMatrix->setIdentity();
auto stream = context->getCudaStream();
if (inputMatrix->isIdentityMatrix()) { // the inverse for I is I
return;
}
// invert upper matrix
// invert the second diagonal
hipLaunchKernelGGL(( upvertKernelUp<T>), dim3(1), dim3(n), 512, *stream, invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(),
inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n);
// invert other elements
hipLaunchKernelGGL(( invertUpKernel<T>), dim3(n), dim3(n), 512, *stream, invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(),
inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n);
}
// ------------------------------------------------------------------------------------------------------------------ //
// invertion of upper triangular matrix - runner routine
void invertUpperMatrix(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) {
NDArray::prepareSpecialUse({invertedMatrix}, {inputMatrix});
BUILD_SINGLE_SELECTOR(invertedMatrix->dataType(), invertUpperMatrix_, (context, inputMatrix, invertedMatrix),
SD_FLOAT_NATIVE);
NDArray::prepareSpecialUse({invertedMatrix}, {inputMatrix});
}
// ------------------------------------------------------------------------------------------------------------------ //
// determinant kernel - accumulation product of all values on the main diagonal
template <typename T>
static SD_KERNEL void determinantKernel(T *compound, T *result, sd::LongType len) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto i = start; i < len; i += step) {
auto pos = i * len + i; // shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), di, 2);
// multiply all diagonal elements
math::atomics::sd_atomicMul(&result[0], compound[pos]);
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// determinant logarithm - accumulation sum of all logarithm values on the main diagonal. All in logarithic values
// should be positive
template <typename T>
static SD_KERNEL void determinantLogKernel(T *compound, T *result, sd::LongType len) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto i = start; i < len; i += step) {
auto pos = i * len + i; // shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), di, 2);
// sum logs of all diagonal elements
math::atomics::sd_atomicAdd(result, math::sd_log<T, T>(math::sd_abs(compound[pos])));
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// kernel to copy matrix with given shape to compound tensor with given pos
// output - a N-D tensor buffer with rank not less than 2, input - 2D square n x n matrix with n = rowLen
template <typename T, typename F>
static SD_KERNEL void fillMatrix(void *output, const sd::LongType *outShape, const void *input,
const sd::LongType *inputShape, sd::LongType pos, sd::LongType rowLen) {
__shared__ F *matrix;
__shared__ const T *inputBuf;
__shared__ sd::LongType inputLen;
__shared__ sd::LongType n2;
if (threadIdx.x == 0) {
matrix = reinterpret_cast<F *>(output);
inputBuf = reinterpret_cast<const T *>(input);
inputLen = shape::length(inputShape);
n2 = rowLen * rowLen;
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int k = pos + start, j = start; j < n2; k += step, j += step) {
auto xIndex = shape::getIndexOffset(k, inputShape);
matrix[j] = (F)inputBuf[xIndex];
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// same as above, but without type conversion
template <typename T>
static SD_KERNEL void returnMatrix(void *output, const sd::LongType *outputShape, const void *input,
const sd::LongType *inputShape, sd::LongType pos, sd::LongType rowLen) {
__shared__ sd::LongType outputLen;
__shared__ sd::LongType n2;
auto matrix = reinterpret_cast<const T *>(input);
auto outputBuf = reinterpret_cast<T *>(output);
if (threadIdx.x == 0) {
outputLen = shape::length(inputShape);
n2 = rowLen * rowLen;
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int k = pos + start, j = start; j < n2; k += step, j += step) {
auto zIndex = shape::getIndexOffset(k, outputShape);
outputBuf[zIndex] = matrix[j];
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// fill up permutaion matrix kernel. Permutation matrix filled with zeros and ones
template <typename F>
static SD_KERNEL void fillUpPermutation(void *output, const sd::LongType *shape, int *source, int rowNum) {
F *permutation = reinterpret_cast<F *>(output);
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto i = start; i < rowNum; i += step) {
int val = source[i] - 1;
sd::LongType posF[] = {i, val};
auto pos = shape::getOffset(shape, posF);
permutation[pos] = F(1.f);
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// LUP decomposition runner - using CUBLAS SOLVER
// if permutation is given, then using LUP decomposition, LU decomposition otherwise
// L - lower triangular, U - upper triangular, P - permutation matricies
// PA = LU
//
// input - A matrix nxn
// compound - C matrix L + U - I, or main diagonal and lower - L matrix, from the 2nd diagonal - U matrix
template <typename T, typename I>
static void lup_(LaunchContext *context, NDArray *input, NDArray *compound, NDArray *permutation) {
auto stream = context->getCudaStream();
auto n = input->rows();
std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex());
hipsolverDnHandle_t *cusolverH = (hipsolverDnHandle_t *)context->getCusolverHandle(); // nullptr;
// create solver handle
cusolverStatus_t status; // hipsolverDnCreate(&cusolverH);
// if (CUSOLVER_STATUS_SUCCESS != status) {
// throw cuda_exception::build("Cannot create cuSolver handle", status);
// }
// set solver stream
status = hipsolverDnSetStream(*cusolverH, *stream);
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("Cannot set up stream for cuda solver", status);
}
int lwork = 0;
int *d_info = nullptr;
// allocate memory for permutation vector
auto err = hipMalloc((void **)&d_info, sizeof(int));
if (err) {
throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver info buffer", err);
}
DataType dtype = input->dataType();
switch (dtype) { // there are two implementations with cublas for LUP decomposition - double and float
case DataType::DOUBLE: {
double *d_work = nullptr;
// compute internal buffer size
double *matrix = reinterpret_cast<double *>(input->specialBuffer());
status = hipsolverDnDgetrf_bufferSize(*cusolverH, n, n, matrix, n, &lwork);
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("helpers::lup_: Cannot create cuSolver handle", status);
}
err = hipMalloc((void **)&d_work, sizeof(float) * lwork);
if (err) {
throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver data buffer", err);
}
if (permutation == nullptr) {
status = hipsolverDnDgetrf(*cusolverH, n, n, matrix, n, d_work, nullptr, d_info);
if (status != CUSOLVER_STATUS_SUCCESS) {
throw cuda_exception::build("helpers::lup_: LU factorization is failed due ", status);
}
} else {
NDArray permutVector('c', {n}, sd::DataType::INT32, context);
int *permutationBuf = permutVector.dataBuffer()->specialAsT<int>();
status = hipsolverDnDgetrf(*cusolverH, n, n, matrix, n, d_work, permutationBuf, d_info);
if (status != CUSOLVER_STATUS_SUCCESS) {
throw cuda_exception::build("helpers::lup_: LU factorization is failed due ", status);
}
if (permutation->rankOf() == 2) {
hipLaunchKernelGGL(( fillUpPermutation<double>), dim3(n), dim3(n), 1024, *stream, permutation->specialBuffer(),
permutation->specialShapeInfo(), permutationBuf, n);
} else {
permutVector.tickWriteDevice();
input->tickWriteDevice();
compound->assign(input);
permutation->assign(permutVector);
}
}
err = hipFree(d_work);
if (err) {
throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver data buffer", err);
}
} break;
case DataType::FLOAT32: {
float *matrix = reinterpret_cast<float *>(input->specialBuffer());
float *d_work = nullptr;
status = hipsolverDnSgetrf_bufferSize(*cusolverH, n, n, matrix, n, &lwork);
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("helpers::lup_: Cannot create cuSolver handle", status);
}
err = hipMalloc((void **)&d_work, sizeof(float) * lwork);
if (err) {
throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver data buffer", err);
}
if (permutation == nullptr)
status = hipsolverDnSgetrf(*cusolverH, n, n, matrix, n, d_work, nullptr, d_info);
else {
NDArray permutVector('c', {n}, DataType::INT32, context);
int *permutationBuf = reinterpret_cast<int *>(permutVector.specialBuffer());
status = hipsolverDnSgetrf(*cusolverH, n, n, matrix, n, d_work, permutationBuf, d_info);
if (permutation->rankOf() == 2) {
hipLaunchKernelGGL(( fillUpPermutation<I>), dim3(n), dim3(n), 128, *stream, permutation->specialBuffer(), permutation->specialShapeInfo(),
permutationBuf, n);
permutation->tickWriteDevice();
} else {
input->tickWriteDevice();
compound->assign(input);
permutation->assign(permutVector);
}
}
err = hipFree(d_work);
if (err) {
throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver data buffer", err);
}
}
}
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("helpers::lup_: Cannot make LU decomposition", status);
}
err = hipFree(d_info);
if (err) {
throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver info buffer", err);
}
// hipsolverDnDestroy(cusolverH);
// NDArray::registerSpecialUse({input}, {input});
input->tickWriteDevice();
}
// ------------------------------------------------------------------------------------------------------------------ //
BUILD_DOUBLE_TEMPLATE(template void lup_,
(LaunchContext * context, NDArray *input, NDArray *output, NDArray *permutation), SD_FLOAT_NATIVE,
SD_INDEXING_TYPES);
template <typename T>
static SD_DEVICE void swapRows(T *matrix, const sd::LongType *shape, sd::LongType theFirst, sd::LongType theSecond,
sd::LongType n) {
if (theFirst != theSecond) {
for (auto i = 0; i < n; i++) {
sd::LongType theFirstPos[] = {theFirst, i};
sd::LongType theSecondPos[] = {theSecond, i};
auto theFirstIndex = shape::getOffset(shape, theFirstPos, 0);
auto theSecondIndex = shape::getOffset(shape, theSecondPos, 0);
math::sd_swap(matrix[theFirstIndex], matrix[theSecondIndex]);
}
}
}
template <typename T>
static SD_DEVICE void processColumns(sd::LongType currentRow, sd::LongType rowNum, T *compoundBuf,
const sd::LongType *compoundShape) {
sd::LongType xDiag[] = {currentRow, currentRow};
auto diagIndex = shape::getOffset(compoundShape, xDiag, 0);
for (auto j = currentRow + 1; j < rowNum; j++) {
sd::LongType xRow[] = {j, currentRow};
auto rowIndex = shape::getOffset(compoundShape, xRow, 0);
compoundBuf[rowIndex] /= compoundBuf[diagIndex]; // output->t<T>(i, i);
for (auto k = currentRow + 1; k < rowNum; k++) {
sd::LongType yRow[] = {j, k};
sd::LongType yCol[] = {currentRow, k};
auto rowIndexY = shape::getOffset(compoundShape, yRow, 0);
auto colIndex = shape::getOffset(compoundShape, yCol, 0);
compoundBuf[rowIndexY] -= compoundBuf[rowIndex] * compoundBuf[colIndex];
}
}
}
template <typename T>
SD_DEVICE sd::LongType argmaxCol(sd::LongType column, T *compoundBuffer, const sd::LongType *compoundShape) {
auto rowNum = shape::sizeAt(compoundShape, 0);
sd::LongType xInitial[] = {column, column};
auto xInitialIndex = shape::getOffset(compoundShape, xInitial, 0);
auto maxValue = T(0); // sd::math::sd_abs(compoundBuffer[xInitialIndex]);
auto result = -1LL;
for (auto rowCounter = column; rowCounter < rowNum; rowCounter++) {
sd::LongType xPos[] = {rowCounter, column};
auto xIndex = shape::getOffset(compoundShape, xPos, 0);
if (sd::math::sd_abs(compoundBuffer[xIndex]) > maxValue) {
maxValue = sd::math::sd_max(maxValue, sd::math::sd_abs(compoundBuffer[xIndex]));
result = rowCounter;
}
}
return result;
}
template <typename T, typename I>
static SD_DEVICE int luNN(T *matrix, const sd::LongType *shape, I *permutation, const sd::LongType *permuShape,
sd::LongType n) {
for (auto i = 0; i < n - 1; i++) {
auto pivotIndex = argmaxCol(i, matrix, shape);
if (pivotIndex < 0) {
return -1; // throw std::runtime_error("helpers::luNN_: input matrix is singular.");
}
math::sd_swap(permutation[shape::getIndexOffset(i, permuShape)],
permutation[shape::getIndexOffset(pivotIndex, permuShape)]);
swapRows(matrix, shape, (sd::LongType)i, pivotIndex, n);
processColumns(i, n, matrix, shape);
}
return 0;
}
template <typename T, typename I>
static SD_KERNEL void luBatchedKernel(T *outputBuf, const sd::LongType *outputShape, I *permutations,
const sd::LongType *permuShape, const sd::LongType *outputTadShape,
const sd::LongType *outputTadOffsets, const sd::LongType *permuTadShape,
const sd::LongType *permuTadOffsets, sd::LongType batchNum) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto b = start; b < batchNum; b += step) {
T *matrix = outputBuf + outputTadOffsets[b];
I *permutation = permutations + permuTadOffsets[b];
if (0 != luNN(matrix, outputTadShape, permutation, permuTadShape, shape::length(permuTadShape))) break;
}
}
template <typename T, typename I>
static void lu_(LaunchContext *context, NDArray *input, NDArray *output, NDArray *permutationVectors) {
auto n = input->sizeAt(-1);
auto stream = context->getCudaStream();
NDArray iota('c', {n}, permutationVectors->dataType(), context); // = NDArrayFactory::create(); // <int>('c', {n});
iota.linspace(0);
iota.syncToDevice();
output->assign(input); // fill up output tensor with zeros
// output->tickWriteDevice();
permutationVectors->applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), iota, *permutationVectors, true, nullptr);
// permutationVectors->tickWriteDevice();
auto tads = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {-2, -1});
auto permutaionTads = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {-1});
auto batchNum = tads.numberOfTads();
hipLaunchKernelGGL(( luBatchedKernel<T, I>), dim3(batchNum), dim3(256), 1024, *stream,
reinterpret_cast<T *>(output->platformBuffer()), output->specialShapeInfo(),
reinterpret_cast<I *>(permutationVectors->platformBuffer()), permutationVectors->specialShapeInfo(),
tads.specialShapeInfo(), tads.specialOffsets(), permutaionTads.specialShapeInfo(),
permutaionTads.specialOffsets(), batchNum);
}
void lu(LaunchContext *context, NDArray *input, NDArray *output, NDArray *permutations) {
NDArray::prepareSpecialUse({output, permutations}, {input});
BUILD_DOUBLE_SELECTOR(input->dataType(), permutations->dataType(), lu_, (context, input, output, permutations),
SD_FLOAT_NATIVE, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output, permutations}, {input});
}
// ------------------------------------------------------------------------------------------------------------------ //
template <typename T>
static sd::Status determinant_(sd::LaunchContext *context, NDArray *input, NDArray *output) {
sd::LongType n = input->sizeAt(-1);
sd::LongType n2 = n * n;
std::vector<int> dims();
auto packX =
ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {input->rankOf() - 2, input->rankOf() - 1});
// auto packZ = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {output->rankOf() - 1});
// DataType dtype = input->dataType();
// if (dtype != DataType::DOUBLE)
// dtype = DataType::FLOAT32;
auto matrix =
NDArrayFactory::create(input->ordering(), {n, n}, DataTypeUtils::fromT<T>(), context); //, block.getWorkspace());
auto det = NDArrayFactory::create<T>(1, context);
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input});
dim3 launchDims(256, 256, 1024);
output->assign(1.f);
for (int e = 0; e < output->lengthOf(); e++) {
sd::LongType pos = e * n2;
// if (matrix.dataType() == input->dataType())
hipLaunchKernelGGL(( fillMatrix<T, T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream,
matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), pos, n);
// else
// fillMatrix<T, float><<<launchDims.x, launchDims.y, launchDims.z,
// *stream>>>(matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(),
// input->special(), pos, n);
lup_<T, int>(context, &matrix, nullptr, nullptr);
// else
// lup_<float>(context, &matrix, nullptr, nullptr);
auto offset = shape::getIndexOffset(e, output->shapeInfo());
auto inputBuf = reinterpret_cast<T *>(matrix.specialBuffer());
auto outputBuf = reinterpret_cast<T *>(output->specialBuffer()) + offset;
// if (matrix.dataType() == input->dataType())
hipLaunchKernelGGL(( determinantKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, inputBuf, outputBuf, n);
// else
// determinantKernel<T, float><<<launchDims.x, launchDims.y, launchDims.z, *stream >>> (inputBuf,
// outputBuf, n);
}
NDArray::registerSpecialUse({output}, {input});
return sd::Status::OK;
}
sd::Status determinant(sd::LaunchContext *context, NDArray *input, NDArray *output) {
NDArray::prepareSpecialUse({output}, {input});
BUILD_SINGLE_SELECTOR(input->dataType(), return determinant_, (context, input, output), SD_FLOAT_NATIVE);
NDArray::registerSpecialUse({output}, {input});
}
template <typename T>
sd::Status logAbsDeterminant_(LaunchContext *context, NDArray *input, NDArray *output) {
sd::LongType n = input->sizeAt(-1);
sd::LongType n2 = n * n;
std::vector<int> dims();
auto packX =
ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {input->rankOf() - 2, input->rankOf() - 1});
// auto packZ = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {output->rankOf() - 1});
DataType dtype = input->dataType();
if (dtype != DataType::DOUBLE) dtype = DataType::FLOAT32;
auto matrix = NDArrayFactory::create(input->ordering(), {n, n}, dtype, context); //, block.getWorkspace());
auto det = NDArrayFactory::create<T>(1, context);
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input});
dim3 launchDims(256, 256, 1024);
output->assign(0.f);
for (int e = 0; e < output->lengthOf(); e++) {
sd::LongType pos = e * n2;
// if (matrix.dataType() == input->dataType())
hipLaunchKernelGGL(( fillMatrix<T, T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream,
matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), pos, n);
// else
// fillMatrix<T, float><<<launchDims.x, launchDims.y, launchDims.z,
// *stream>>>(matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(),
// input->special(), pos, n);
// if (matrix.dataType() == input->dataType())
lup_<T, int>(context, &matrix, nullptr, nullptr);
// else
// lup_<float>(context, &matrix, nullptr, nullptr);
auto offset = shape::getIndexOffset(e, output->shapeInfo());
auto inputBuf = reinterpret_cast<T *>(matrix.specialBuffer());
auto outputBuf = reinterpret_cast<T *>(output->specialBuffer()) + offset;
// if (matrix.dataType() == input->dataType())
hipLaunchKernelGGL(( determinantLogKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, inputBuf, outputBuf, n);
// else
// determinantLogKernel<T, float><<<launchDims.x, launchDims.y, launchDims.z, *stream >>> (inputBuf,
// outputBuf, n);
}
NDArray::registerSpecialUse({output}, {input});
return sd::Status::OK;
}
sd::Status logAbsDeterminant(sd::LaunchContext *context, NDArray *input, NDArray *output) {
NDArray::prepareSpecialUse({output}, {input});
BUILD_SINGLE_SELECTOR(input->dataType(), return logAbsDeterminant_, (context, input, output), SD_FLOAT_NATIVE);
NDArray::registerSpecialUse({output}, {input});
}
template <typename T>
static SD_KERNEL void fillLowerUpperKernel(void *lowerBuf, const sd::LongType *lowerShape, void *upperBuf,
const sd::LongType *upperShape, void *matrixBuf,
const sd::LongType *matrixShape, sd::LongType n) {
__shared__ T *lowerMatrix;
__shared__ T *upperMatrix;
__shared__ T *matrix;
if (threadIdx.x == 0) {
lowerMatrix = reinterpret_cast<T *>(lowerBuf);
upperMatrix = reinterpret_cast<T *>(upperBuf);
matrix = reinterpret_cast<T *>(matrixBuf);
}
__syncthreads();
for (int k = blockIdx.x; k < n; k += gridDim.x) { // and then put all values under main diagonal on to it
for (int j = threadIdx.x; j < n; j += blockDim.x) {
sd::LongType posX[] = {k, j};
sd::LongType posD[] = {j, j};
auto xPos = shape::getOffset(lowerShape, posX);
auto yPos = shape::getOffset(upperShape, posX);
auto iPos = shape::getOffset(matrixShape, posX);
auto dPos = shape::getOffset(matrixShape, posD);
if (k >= j)
lowerMatrix[xPos] = matrix[iPos]; //(k, j);
else
upperMatrix[yPos] = matrix[iPos]; // k, j);
}
}
}
template <typename T>
static sd::Status inverse_(sd::LaunchContext *context, NDArray *input, NDArray *output) {
auto n = input->sizeAt(-1);
auto n2 = n * n;
auto dtype = DataTypeUtils::fromT<T>(); // input->dataType();
// if (dtype != DataType::DOUBLE)
// dtype = DataType::FLOAT32;
NDArray matrix = NDArrayFactory::create('c', {n, n}, dtype, context);
NDArray upper = NDArrayFactory::create('c', {n, n}, dtype, context);
NDArray lower = NDArrayFactory::create('c', {n, n}, dtype, context);
NDArray compound = NDArrayFactory::create('c', {n, n}, dtype, context);
NDArray permutation = NDArrayFactory::create('c', {n, n}, dtype, context);
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(),
{input->rankOf() - 2, input->rankOf() - 1});
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(),
{output->rankOf() - 2, output->rankOf() - 1});
auto stream = context->getCudaStream();
for (auto i = 0LL; i < packX.numberOfTads(); i++) {
hipLaunchKernelGGL(( fillMatrix<T, T>), dim3(1), dim3(n2), 1024, *stream, matrix.specialBuffer(), matrix.specialShapeInfo(),
input->specialBuffer(), input->specialShapeInfo(), i * n2, n);
matrix.tickWriteDevice();
// compound.assign(matrix);
// if (matrix.dataType() == input->dataType())
lup_<T, int>(context, &matrix, nullptr, nullptr);
hipLaunchKernelGGL(( fillLowerUpperKernel<T>), dim3(n), dim3(n), 1024, *stream, lower.specialBuffer(), lower.specialShapeInfo(),
upper.specialBuffer(), upper.specialShapeInfo(),
matrix.specialBuffer(), matrix.specialShapeInfo(), n);
lower.tickWriteDevice();
upper.tickWriteDevice();
// lower.printIndexedBuffer("LOWER");
// upper.printIndexedBuffer("UPPER");
matrix.assign(0);
invertUpperMatrix(context, &upper, &matrix); // U^{-1}
matrix.tickWriteDevice();
// matrix.printIndexedBuffer("Upper Inverted");
compound.assign(0);
invertLowerMatrix(context, &lower, &compound); // L{-1}
compound.tickWriteDevice();
// compound.printIndexedBuffer("Lower Inverted");
// matrix.tickWriteDevice();
// compound.tickWriteDevice();
sd::MmulHelper::mmul(&matrix, &compound, &upper, 1.0, 0.0);
upper.tickWriteDevice();
// upper.printIndexedBuffer("Full inverted");
hipLaunchKernelGGL(( returnMatrix<T>), dim3(1), dim3(n2), 1024, *stream, output->specialBuffer(), output->specialShapeInfo(),
upper.specialBuffer(), upper.specialShapeInfo(), i * n2, n);
}
return sd::Status::OK;
}
sd::Status inverse(sd::LaunchContext *context, NDArray *input, NDArray *output) {
NDArray::prepareSpecialUse({output}, {input});
BUILD_SINGLE_SELECTOR(input->dataType(), return inverse_, (context, input, output), SD_FLOAT_NATIVE);
NDArray::registerSpecialUse({output}, {input});
}
bool checkCholeskyInput(sd::LaunchContext *context, NDArray const *input) { return true; }
template <typename F>
SD_KERNEL void fillBatchKernel(F **dArrayBatch, F *buf, const sd::LongType *offsets, sd::LongType batchSize) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto i = start; i < batchSize; i += step) {
dArrayBatch[i] = buf + offsets[i];
}
}
template <typename F>
SD_KERNEL void adjustResultsKernel(F *dArray, const sd::LongType *shape, const sd::LongType *offsets,
sd::LongType batchSize, sd::LongType n) {
// auto i = blockIdx.x * blockDim.x + threadIdx.x;
sd::LongType *shapeOf = shape::shapeOf(shape);
sd::LongType *strideOf = shape::stride(shape);
for (auto i = blockIdx.x; i < batchSize; i += gridDim.x) {
auto current = dArray + offsets[i];
for (auto r = threadIdx.x; r < n; r += blockDim.x) {
for (auto c = r + 1; c < n; c++) {
sd::LongType posRC[] = {r, c};
auto pos = r * n + c; // shape::getOffset(0, shapeOf, strideOf, posRC, 2);
current[pos] = 0.;
}
}
}
}
template <typename F>
sd::Status cholesky__(LaunchContext *context, NDArray *input, NDArray *output, bool inplace) {
if (!inplace) output->assign(input);
auto tempOutput = output->dup();
hipsolverDnHandle_t handle = nullptr;
auto n = input->sizeAt(-1);
auto n2 = n * n;
NDArray::prepareSpecialUse({output}, {input});
auto status = hipsolverDnCreate(&handle);
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("helpers::cholesky_: Cannot create solver handle", status);
}
F **dArrayBatch = nullptr;
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(
tempOutput.shapeInfo(), {tempOutput.rankOf() - 2, tempOutput.rankOf() - 1});
const sd::LongType batchSize = packX.numberOfTads();
int *dInfoArray = nullptr;
auto err = hipMalloc((void **)&dArrayBatch, sizeof(F *) * batchSize);
if (err) {
throw cuda_exception::build("helpers::cholesky_: Cannot allocate memory for solver batch data buffer", err);
}
err = hipMalloc((void **)&dInfoArray, sizeof(int) * batchSize);
if (err) {
throw cuda_exception::build("helpers::cholesky_: Cannot allocate memory for solver errors buffer", err);
}
auto stream = context->getCudaStream();
hipLaunchKernelGGL(( fillBatchKernel<F>), dim3(1), dim3(batchSize), 128, *stream, dArrayBatch, reinterpret_cast<F *>(tempOutput.specialBuffer()),
packX.specialOffsets(), batchSize);
status = hipsolverDnSetStream(handle, *stream);
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("helpers::cholesky_: Cannot set stream to solver handle", status);
}
const hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_UPPER;
if (input->dataType() == DataType::DOUBLE)
status = hipsolverDnDpotrfBatched(handle, uplo, n, (double **)dArrayBatch, n, dInfoArray, batchSize);
else
status = hipsolverDnSpotrfBatched(handle, uplo, n, (float **)dArrayBatch, n, dInfoArray, batchSize);
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("helpers::cholesky_: Cholesky factorization failed for batch", status);
}
hipLaunchKernelGGL(( adjustResultsKernel<F>), dim3(batchSize), dim3(n2), 128, *stream, reinterpret_cast<F *>(tempOutput.specialBuffer()),
packX.specialShapeInfo(), packX.specialOffsets(), batchSize,
n);
err = hipFree(dArrayBatch);
if (err) {
throw cuda_exception::build("helpers::cholesky_: Cannot deallocate memory for solver batch data buffer", err);
}
err = hipFree(dInfoArray);
if (err) {
throw cuda_exception::build("helpers::cholesky_: Cannot allocate memory for solver errors buffer", err);
}
if (!inplace)
output->assign(tempOutput);
else
input->assign(tempOutput);
NDArray::registerSpecialUse({output}, {input});
return sd::Status::OK;
}
// template <typename T>
sd::Status cholesky_(LaunchContext *context, NDArray *input, NDArray *output, bool inplace) {
NDArray::prepareSpecialUse({output}, {input});
if (input->dataType() == DataType::DOUBLE)
cholesky__<double>(context, input, output, inplace);
else if (input->dataType() == DataType::FLOAT32)
cholesky__<float>(context, input, output, inplace);
else {
std::unique_ptr<NDArray> tempOutput(
NDArrayFactory::create_('c', input->getShapeAsVector(), DataType::FLOAT32, context));
tempOutput->assign(input);
cholesky__<float>(context, tempOutput.get(), tempOutput.get(), true);
output->assign(tempOutput.get());
}
NDArray::registerSpecialUse({output}, {input});
return sd::Status::OK;
}
sd::Status cholesky(sd::LaunchContext *context, NDArray *input, NDArray *output, bool inplace) {
// BUILD_SINGLE_SELECTOR(input->dataType(), return cholesky_, (context, input, output, inplace),
// SD_FLOAT_TYPES);
return cholesky_(context, input, output, inplace);
}
// BUILD_SINGLE_TEMPLATE(template sd::Status cholesky_, (LaunchContext* context, NDArray* input, NDArray* output,
// bool inplace), SD_FLOAT_TYPES);
BUILD_SINGLE_TEMPLATE(template sd::Status inverse_, (sd::LaunchContext * context, NDArray *input, NDArray *output),
SD_FLOAT_NATIVE);
template <typename T>
SD_KERNEL void logDetKernel(const T *inputBuf, const sd::LongType *inputShape, sd::LongType batchNum,
const sd::LongType *tadShape, const sd::LongType *tadOffsets, T *outputBuf,
const sd::LongType *outputShape) {
__shared__ int n;
if (threadIdx.x == 0) {
n = shape::sizeAt(inputShape, -1); // * shape::sizeAt(inputShape, -1);
}
__syncthreads();
auto output = outputBuf;
auto input = inputBuf;
for (auto i = blockIdx.x; i < batchNum; i += gridDim.x) {
auto current = input + tadOffsets[i];
auto zIndex = shape::getIndexOffset(i, outputShape);
for (auto e = threadIdx.x; e < n; e += blockDim.x) {
sd::LongType diag[] = {e, e};
auto xIndex = shape::getOffset(tadShape, diag);
math::atomics::sd_atomicAdd(&output[zIndex], math::sd_log<T, T>(current[xIndex] * current[xIndex]));
}
}
}
template <typename T>
sd::Status logdetFunctor_(sd::LaunchContext *context, NDArray *input, NDArray *output) {
NDArray::prepareSpecialUse({output}, {input});
auto n2 = input->sizeAt(-1) * input->sizeAt(-2);
auto stream = context->getCudaStream();
NDArray tempOutput(*input);
cholesky(context, input, &tempOutput, false);
auto outputBuf = output->dataBuffer()
->specialAsT<T>(); // reinterpret_cast<T*>(output->specialBuffer()); // + e * n2; // + e * n2;
auto inputBuf = tempOutput.dataBuffer()->specialAsT<T>(); // reinterpret_cast<T*>(tempOutput.specialBuffer());
output->nullify();
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(
tempOutput.shapeInfo(), {tempOutput.rankOf() - 2, tempOutput.rankOf() - 1});
hipLaunchKernelGGL(( logDetKernel<T>), dim3(128), dim3(512), 256, *stream, inputBuf, tempOutput.specialShapeInfo(), packX.numberOfTads(),
packX.specialShapeInfo(), packX.specialOffsets(), outputBuf,
output->specialShapeInfo());
output->tickWriteDevice();
NDArray::registerSpecialUse({output}, {input});
return sd::Status::OK;
}
sd::Status logdetFunctor(sd::LaunchContext *context, NDArray *input, NDArray *output) {
BUILD_SINGLE_SELECTOR(output->dataType(), return logdetFunctor_, (context, input, output), SD_FLOAT_NATIVE);
}
/*
* lup - batched input, batched outputs
* */
sd::Status lup(LaunchContext *context, NDArray *input, NDArray *compound, NDArray *permutation) {
BUILD_DOUBLE_SELECTOR(input->dataType(), permutation->dataType(), lup_, (context, input, compound, permutation),
SD_FLOAT_NATIVE, SD_INDEXING_TYPES);
return sd::Status::OK;
}
// BUILD_SINGLE_TEMPLATE(template sd::Status logdetFunctor_,
// (sd::LaunchContext * context, NDArray * input, NDArray * output), SD_FLOAT_NATIVE);
} // namespace helpers
} // namespace ops
} // namespace sd
|
1a7f93455af1ed629006dd45d5e43b4b6dcdbb91.cu
|
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <array/NDArrayFactory.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/MmulHelper.h>
#include <helpers/ShapeUtils.h>
#include <ops/declarable/helpers/top_k.h>
//#include <ops/declarable/generic/helpers/BroadcastHelper.h>
#include <cusolverDn.h>
#include <exceptions/cuda_exception.h>
namespace sd {
namespace ops {
namespace helpers {
// ------------------------------------------------------------------------------------------------------------------ //
// invert the second diagonal for lower diagonal matrix
template <typename T>
static SD_KERNEL void invertKernelLow(void *invertedBuf, const sd::LongType *invertedShape, const void *inputBuf,
const sd::LongType *inputShape, sd::LongType n) {
auto inverted = reinterpret_cast<T *>(invertedBuf);
auto input = reinterpret_cast<const T *>(inputBuf);
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int i = start + 1; i < n; i += step) {
sd::LongType pos[] = {i, i - 1};
sd::LongType posX[] = {i, i};
sd::LongType posY[] = {i - 1, i - 1};
auto xIndex = shape::getOffset(inputShape, pos);
auto dxIndex = shape::getOffset(inputShape, posX);
auto dyIndex = shape::getOffset(inputShape, posY);
auto zIndex = shape::getOffset(invertedShape, pos);
// invert lower triangular matrix
inverted[zIndex] = -input[xIndex] / (input[dxIndex] * input[dyIndex]);
// math::atomics::sd_atomicAdd(&inverted[zIndex], - input[xIndex] * inverted[iIndex] / input[dIndex]);
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// invert diagonal vals to upper diagonal matrix
template <typename T>
static SD_KERNEL void upvertKernel(void *invertedBuf, const sd::LongType *invertedShape, const void *inputBuf,
const sd::LongType *inputShape, sd::LongType n) {
auto inverted = reinterpret_cast<T *>(invertedBuf);
auto input = reinterpret_cast<const T *>(inputBuf);
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int i = start; i < n; i += step) {
sd::LongType pos[] = {i, i};
auto xIndex = shape::getOffset(inputShape, pos);
auto zIndex = shape::getOffset(invertedShape, pos);
// invert diagonal elements
inverted[zIndex] /= input[xIndex];
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// invert upper second diagonal
template <typename T>
static SD_KERNEL void upvertKernelUp(void *invertedBuf, const sd::LongType *invertedShape, const void *inputBuf,
const sd::LongType *inputShape, sd::LongType n) {
__shared__ T *inverted;
__shared__ const T *input;
if (threadIdx.x == 0) {
inverted = reinterpret_cast<T *>(invertedBuf);
input = reinterpret_cast<const T *>(inputBuf);
}
__syncthreads();
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int i = start; i < n - 1; i += step) {
sd::LongType pos[] = {i, i + 1};
sd::LongType posX[] = {i + 1, i + 1};
auto xIndex = shape::getOffset(inputShape, pos);
auto iIndex = shape::getOffset(invertedShape, posX);
auto zIndex = shape::getOffset(invertedShape, pos);
// invert upper matrix
math::atomics::sd_atomicAdd(&inverted[zIndex], -input[xIndex] * inverted[iIndex]); // / input[yIndex]);
// inputMatrix->t<T>(i, i + 1) * invertedMatrix->t<T>(i + 1, i + 1) / inputMatrix->t<T>(i, i)
}
}
// ------------------------------------------------------------------------------------------------------------------ //
template <typename T>
static SD_KERNEL void invertLowKernel(void *invertedBuf, const sd::LongType *invertedShape, const void *inputBuf,
const sd::LongType *inputShape, sd::LongType n) {
auto input = reinterpret_cast<const T *>(inputBuf);
auto inverted = reinterpret_cast<T *>(invertedBuf);
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
for (int i = tid + 2; i < n; i += step) {
for (int j = i - 2; j >= 0; --j)
for (int k = 0; k < i; k++) {
sd::LongType posZ[] = {i, j};
sd::LongType posY[] = {k, j};
sd::LongType posX[] = {i, k};
sd::LongType posD[] = {i, i};
auto xIndex = shape::getOffset(inputShape, posX);
auto yIndex = shape::getOffset(invertedShape, posY);
auto dIndex = shape::getOffset(inputShape, posD);
auto zIndex = shape::getOffset(invertedShape, posZ);
// invert non-diagonal elements
math::atomics::sd_atomicAdd(&inverted[zIndex], -inverted[yIndex] * input[xIndex] / input[dIndex]);
}
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// Invertion of upper triangular matrix non-diagonal elements when main and second diagonals already processed
template <typename T>
static SD_KERNEL void invertUpKernel(void *invertedBuf, const sd::LongType *invertedShape, const void *inputBuf,
const sd::LongType *inputShape, sd::LongType n) {
auto inverted = reinterpret_cast<T *>(invertedBuf);
;
auto input = reinterpret_cast<const T *>(inputBuf);
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int i = (int)n - tid - 2; i >= 0; i -= step) {
for (int j = i + 2; j < (int)n; j++)
for (int k = i; k < (int)n; k++) {
sd::LongType posZ[] = {i, j};
sd::LongType posY[] = {k, j};
sd::LongType posX[] = {i, k};
// inversion with Joardan Gauss transformation
auto xIndex = shape::getOffset(inputShape, posX);
auto yIndex = shape::getOffset(invertedShape, posY);
auto zIndex = shape::getOffset(invertedShape, posZ);
// invert upper non-diagonal elements
math::atomics::sd_atomicAdd(&inverted[zIndex], -inverted[yIndex] * input[xIndex]);
}
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// procedure to invert lower-triangular matrix.
// In current case lower triangular matrix has main diagonal with general values
//
template <typename T>
static void invertLowerMatrix_(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) {
int n = inputMatrix->rows();
invertedMatrix->setIdentity();
if (inputMatrix->isIdentityMatrix()) return;
auto stream = context->getCudaStream();
// invert lower matrix
// invert main diagonal
upvertKernel<T><<<1, n, 512, *stream>>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(),
inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n);
// invert the second diagonal
invertKernelLow<T><<<1, n, 512, *stream>>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(),
inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n);
// invert non-diagonal elements
invertLowKernel<T><<<n, n, 512, *stream>>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(),
inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n);
}
// ------------------------------------------------------------------------------------------------------------------ //
// caller for invert lower matrix routine
void invertLowerMatrix(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) {
NDArray::prepareSpecialUse({invertedMatrix}, {inputMatrix});
BUILD_SINGLE_SELECTOR(inputMatrix->dataType(), invertLowerMatrix_, (context, inputMatrix, invertedMatrix),
SD_FLOAT_NATIVE);
NDArray::registerSpecialUse({invertedMatrix}, {inputMatrix});
}
// ------------------------------------------------------------------------------------------------------------------ //
// procedure to invert upper-triangular matrix.
// In current case upper triangular matrix has main diagonal with all ones on it.
template <typename T>
static void invertUpperMatrix_(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) {
int n = inputMatrix->rows();
invertedMatrix->setIdentity();
auto stream = context->getCudaStream();
if (inputMatrix->isIdentityMatrix()) { // the inverse for I is I
return;
}
// invert upper matrix
// invert the second diagonal
upvertKernelUp<T><<<1, n, 512, *stream>>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(),
inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n);
// invert other elements
invertUpKernel<T><<<n, n, 512, *stream>>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(),
inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n);
}
// ------------------------------------------------------------------------------------------------------------------ //
// invertion of upper triangular matrix - runner routine
void invertUpperMatrix(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) {
NDArray::prepareSpecialUse({invertedMatrix}, {inputMatrix});
BUILD_SINGLE_SELECTOR(invertedMatrix->dataType(), invertUpperMatrix_, (context, inputMatrix, invertedMatrix),
SD_FLOAT_NATIVE);
NDArray::prepareSpecialUse({invertedMatrix}, {inputMatrix});
}
// ------------------------------------------------------------------------------------------------------------------ //
// determinant kernel - accumulation product of all values on the main diagonal
template <typename T>
static SD_KERNEL void determinantKernel(T *compound, T *result, sd::LongType len) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto i = start; i < len; i += step) {
auto pos = i * len + i; // shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), di, 2);
// multiply all diagonal elements
math::atomics::sd_atomicMul(&result[0], compound[pos]);
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// determinant logarithm - accumulation sum of all logarithm values on the main diagonal. All in logarithic values
// should be positive
template <typename T>
static SD_KERNEL void determinantLogKernel(T *compound, T *result, sd::LongType len) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto i = start; i < len; i += step) {
auto pos = i * len + i; // shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), di, 2);
// sum logs of all diagonal elements
math::atomics::sd_atomicAdd(result, math::sd_log<T, T>(math::sd_abs(compound[pos])));
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// kernel to copy matrix with given shape to compound tensor with given pos
// output - a N-D tensor buffer with rank not less than 2, input - 2D square n x n matrix with n = rowLen
template <typename T, typename F>
static SD_KERNEL void fillMatrix(void *output, const sd::LongType *outShape, const void *input,
const sd::LongType *inputShape, sd::LongType pos, sd::LongType rowLen) {
__shared__ F *matrix;
__shared__ const T *inputBuf;
__shared__ sd::LongType inputLen;
__shared__ sd::LongType n2;
if (threadIdx.x == 0) {
matrix = reinterpret_cast<F *>(output);
inputBuf = reinterpret_cast<const T *>(input);
inputLen = shape::length(inputShape);
n2 = rowLen * rowLen;
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int k = pos + start, j = start; j < n2; k += step, j += step) {
auto xIndex = shape::getIndexOffset(k, inputShape);
matrix[j] = (F)inputBuf[xIndex];
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// same as above, but without type conversion
template <typename T>
static SD_KERNEL void returnMatrix(void *output, const sd::LongType *outputShape, const void *input,
const sd::LongType *inputShape, sd::LongType pos, sd::LongType rowLen) {
__shared__ sd::LongType outputLen;
__shared__ sd::LongType n2;
auto matrix = reinterpret_cast<const T *>(input);
auto outputBuf = reinterpret_cast<T *>(output);
if (threadIdx.x == 0) {
outputLen = shape::length(inputShape);
n2 = rowLen * rowLen;
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int k = pos + start, j = start; j < n2; k += step, j += step) {
auto zIndex = shape::getIndexOffset(k, outputShape);
outputBuf[zIndex] = matrix[j];
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// fill up permutaion matrix kernel. Permutation matrix filled with zeros and ones
template <typename F>
static SD_KERNEL void fillUpPermutation(void *output, const sd::LongType *shape, int *source, int rowNum) {
F *permutation = reinterpret_cast<F *>(output);
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto i = start; i < rowNum; i += step) {
int val = source[i] - 1;
sd::LongType posF[] = {i, val};
auto pos = shape::getOffset(shape, posF);
permutation[pos] = F(1.f);
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// LUP decomposition runner - using CUBLAS SOLVER
// if permutation is given, then using LUP decomposition, LU decomposition otherwise
// L - lower triangular, U - upper triangular, P - permutation matricies
// PA = LU
//
// input - A matrix nxn
// compound - C matrix L + U - I, or main diagonal and lower - L matrix, from the 2nd diagonal - U matrix
template <typename T, typename I>
static void lup_(LaunchContext *context, NDArray *input, NDArray *compound, NDArray *permutation) {
auto stream = context->getCudaStream();
auto n = input->rows();
std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex());
cusolverDnHandle_t *cusolverH = (cusolverDnHandle_t *)context->getCusolverHandle(); // nullptr;
// create solver handle
cusolverStatus_t status; // cusolverDnCreate(&cusolverH);
// if (CUSOLVER_STATUS_SUCCESS != status) {
// throw cuda_exception::build("Cannot create cuSolver handle", status);
// }
// set solver stream
status = cusolverDnSetStream(*cusolverH, *stream);
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("Cannot set up stream for cuda solver", status);
}
int lwork = 0;
int *d_info = nullptr;
// allocate memory for permutation vector
auto err = cudaMalloc((void **)&d_info, sizeof(int));
if (err) {
throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver info buffer", err);
}
DataType dtype = input->dataType();
switch (dtype) { // there are two implementations with cublas for LUP decomposition - double and float
case DataType::DOUBLE: {
double *d_work = nullptr;
// compute internal buffer size
double *matrix = reinterpret_cast<double *>(input->specialBuffer());
status = cusolverDnDgetrf_bufferSize(*cusolverH, n, n, matrix, n, &lwork);
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("helpers::lup_: Cannot create cuSolver handle", status);
}
err = cudaMalloc((void **)&d_work, sizeof(float) * lwork);
if (err) {
throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver data buffer", err);
}
if (permutation == nullptr) {
status = cusolverDnDgetrf(*cusolverH, n, n, matrix, n, d_work, nullptr, d_info);
if (status != CUSOLVER_STATUS_SUCCESS) {
throw cuda_exception::build("helpers::lup_: LU factorization is failed due ", status);
}
} else {
NDArray permutVector('c', {n}, sd::DataType::INT32, context);
int *permutationBuf = permutVector.dataBuffer()->specialAsT<int>();
status = cusolverDnDgetrf(*cusolverH, n, n, matrix, n, d_work, permutationBuf, d_info);
if (status != CUSOLVER_STATUS_SUCCESS) {
throw cuda_exception::build("helpers::lup_: LU factorization is failed due ", status);
}
if (permutation->rankOf() == 2) {
fillUpPermutation<double><<<n, n, 1024, *stream>>>(permutation->specialBuffer(),
permutation->specialShapeInfo(), permutationBuf, n);
} else {
permutVector.tickWriteDevice();
input->tickWriteDevice();
compound->assign(input);
permutation->assign(permutVector);
}
}
err = cudaFree(d_work);
if (err) {
throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver data buffer", err);
}
} break;
case DataType::FLOAT32: {
float *matrix = reinterpret_cast<float *>(input->specialBuffer());
float *d_work = nullptr;
status = cusolverDnSgetrf_bufferSize(*cusolverH, n, n, matrix, n, &lwork);
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("helpers::lup_: Cannot create cuSolver handle", status);
}
err = cudaMalloc((void **)&d_work, sizeof(float) * lwork);
if (err) {
throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver data buffer", err);
}
if (permutation == nullptr)
status = cusolverDnSgetrf(*cusolverH, n, n, matrix, n, d_work, nullptr, d_info);
else {
NDArray permutVector('c', {n}, DataType::INT32, context);
int *permutationBuf = reinterpret_cast<int *>(permutVector.specialBuffer());
status = cusolverDnSgetrf(*cusolverH, n, n, matrix, n, d_work, permutationBuf, d_info);
if (permutation->rankOf() == 2) {
fillUpPermutation<I><<<n, n, 128, *stream>>>(permutation->specialBuffer(), permutation->specialShapeInfo(),
permutationBuf, n);
permutation->tickWriteDevice();
} else {
input->tickWriteDevice();
compound->assign(input);
permutation->assign(permutVector);
}
}
err = cudaFree(d_work);
if (err) {
throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver data buffer", err);
}
}
}
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("helpers::lup_: Cannot make LU decomposition", status);
}
err = cudaFree(d_info);
if (err) {
throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver info buffer", err);
}
// cusolverDnDestroy(cusolverH);
// NDArray::registerSpecialUse({input}, {input});
input->tickWriteDevice();
}
// ------------------------------------------------------------------------------------------------------------------ //
BUILD_DOUBLE_TEMPLATE(template void lup_,
(LaunchContext * context, NDArray *input, NDArray *output, NDArray *permutation), SD_FLOAT_NATIVE,
SD_INDEXING_TYPES);
template <typename T>
static SD_DEVICE void swapRows(T *matrix, const sd::LongType *shape, sd::LongType theFirst, sd::LongType theSecond,
sd::LongType n) {
if (theFirst != theSecond) {
for (auto i = 0; i < n; i++) {
sd::LongType theFirstPos[] = {theFirst, i};
sd::LongType theSecondPos[] = {theSecond, i};
auto theFirstIndex = shape::getOffset(shape, theFirstPos, 0);
auto theSecondIndex = shape::getOffset(shape, theSecondPos, 0);
math::sd_swap(matrix[theFirstIndex], matrix[theSecondIndex]);
}
}
}
template <typename T>
static SD_DEVICE void processColumns(sd::LongType currentRow, sd::LongType rowNum, T *compoundBuf,
const sd::LongType *compoundShape) {
sd::LongType xDiag[] = {currentRow, currentRow};
auto diagIndex = shape::getOffset(compoundShape, xDiag, 0);
for (auto j = currentRow + 1; j < rowNum; j++) {
sd::LongType xRow[] = {j, currentRow};
auto rowIndex = shape::getOffset(compoundShape, xRow, 0);
compoundBuf[rowIndex] /= compoundBuf[diagIndex]; // output->t<T>(i, i);
for (auto k = currentRow + 1; k < rowNum; k++) {
sd::LongType yRow[] = {j, k};
sd::LongType yCol[] = {currentRow, k};
auto rowIndexY = shape::getOffset(compoundShape, yRow, 0);
auto colIndex = shape::getOffset(compoundShape, yCol, 0);
compoundBuf[rowIndexY] -= compoundBuf[rowIndex] * compoundBuf[colIndex];
}
}
}
template <typename T>
SD_DEVICE sd::LongType argmaxCol(sd::LongType column, T *compoundBuffer, const sd::LongType *compoundShape) {
auto rowNum = shape::sizeAt(compoundShape, 0);
sd::LongType xInitial[] = {column, column};
auto xInitialIndex = shape::getOffset(compoundShape, xInitial, 0);
auto maxValue = T(0); // sd::math::sd_abs(compoundBuffer[xInitialIndex]);
auto result = -1LL;
for (auto rowCounter = column; rowCounter < rowNum; rowCounter++) {
sd::LongType xPos[] = {rowCounter, column};
auto xIndex = shape::getOffset(compoundShape, xPos, 0);
if (sd::math::sd_abs(compoundBuffer[xIndex]) > maxValue) {
maxValue = sd::math::sd_max(maxValue, sd::math::sd_abs(compoundBuffer[xIndex]));
result = rowCounter;
}
}
return result;
}
template <typename T, typename I>
static SD_DEVICE int luNN(T *matrix, const sd::LongType *shape, I *permutation, const sd::LongType *permuShape,
sd::LongType n) {
for (auto i = 0; i < n - 1; i++) {
auto pivotIndex = argmaxCol(i, matrix, shape);
if (pivotIndex < 0) {
return -1; // throw std::runtime_error("helpers::luNN_: input matrix is singular.");
}
math::sd_swap(permutation[shape::getIndexOffset(i, permuShape)],
permutation[shape::getIndexOffset(pivotIndex, permuShape)]);
swapRows(matrix, shape, (sd::LongType)i, pivotIndex, n);
processColumns(i, n, matrix, shape);
}
return 0;
}
template <typename T, typename I>
static SD_KERNEL void luBatchedKernel(T *outputBuf, const sd::LongType *outputShape, I *permutations,
const sd::LongType *permuShape, const sd::LongType *outputTadShape,
const sd::LongType *outputTadOffsets, const sd::LongType *permuTadShape,
const sd::LongType *permuTadOffsets, sd::LongType batchNum) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto b = start; b < batchNum; b += step) {
T *matrix = outputBuf + outputTadOffsets[b];
I *permutation = permutations + permuTadOffsets[b];
if (0 != luNN(matrix, outputTadShape, permutation, permuTadShape, shape::length(permuTadShape))) break;
}
}
template <typename T, typename I>
static void lu_(LaunchContext *context, NDArray *input, NDArray *output, NDArray *permutationVectors) {
auto n = input->sizeAt(-1);
auto stream = context->getCudaStream();
NDArray iota('c', {n}, permutationVectors->dataType(), context); // = NDArrayFactory::create(); // <int>('c', {n});
iota.linspace(0);
iota.syncToDevice();
output->assign(input); // fill up output tensor with zeros
// output->tickWriteDevice();
permutationVectors->applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), iota, *permutationVectors, true, nullptr);
// permutationVectors->tickWriteDevice();
auto tads = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {-2, -1});
auto permutaionTads = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {-1});
auto batchNum = tads.numberOfTads();
luBatchedKernel<T, I><<<batchNum, 256, 1024, *stream>>>(
reinterpret_cast<T *>(output->platformBuffer()), output->specialShapeInfo(),
reinterpret_cast<I *>(permutationVectors->platformBuffer()), permutationVectors->specialShapeInfo(),
tads.specialShapeInfo(), tads.specialOffsets(), permutaionTads.specialShapeInfo(),
permutaionTads.specialOffsets(), batchNum);
}
void lu(LaunchContext *context, NDArray *input, NDArray *output, NDArray *permutations) {
NDArray::prepareSpecialUse({output, permutations}, {input});
BUILD_DOUBLE_SELECTOR(input->dataType(), permutations->dataType(), lu_, (context, input, output, permutations),
SD_FLOAT_NATIVE, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output, permutations}, {input});
}
// ------------------------------------------------------------------------------------------------------------------ //
template <typename T>
static sd::Status determinant_(sd::LaunchContext *context, NDArray *input, NDArray *output) {
sd::LongType n = input->sizeAt(-1);
sd::LongType n2 = n * n;
std::vector<int> dims();
auto packX =
ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {input->rankOf() - 2, input->rankOf() - 1});
// auto packZ = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {output->rankOf() - 1});
// DataType dtype = input->dataType();
// if (dtype != DataType::DOUBLE)
// dtype = DataType::FLOAT32;
auto matrix =
NDArrayFactory::create(input->ordering(), {n, n}, DataTypeUtils::fromT<T>(), context); //, block.getWorkspace());
auto det = NDArrayFactory::create<T>(1, context);
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input});
dim3 launchDims(256, 256, 1024);
output->assign(1.f);
for (int e = 0; e < output->lengthOf(); e++) {
sd::LongType pos = e * n2;
// if (matrix.dataType() == input->dataType())
fillMatrix<T, T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), pos, n);
// else
// fillMatrix<T, float><<<launchDims.x, launchDims.y, launchDims.z,
// *stream>>>(matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(),
// input->special(), pos, n);
lup_<T, int>(context, &matrix, nullptr, nullptr);
// else
// lup_<float>(context, &matrix, nullptr, nullptr);
auto offset = shape::getIndexOffset(e, output->shapeInfo());
auto inputBuf = reinterpret_cast<T *>(matrix.specialBuffer());
auto outputBuf = reinterpret_cast<T *>(output->specialBuffer()) + offset;
// if (matrix.dataType() == input->dataType())
determinantKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(inputBuf, outputBuf, n);
// else
// determinantKernel<T, float><<<launchDims.x, launchDims.y, launchDims.z, *stream >>> (inputBuf,
// outputBuf, n);
}
NDArray::registerSpecialUse({output}, {input});
return sd::Status::OK;
}
sd::Status determinant(sd::LaunchContext *context, NDArray *input, NDArray *output) {
NDArray::prepareSpecialUse({output}, {input});
BUILD_SINGLE_SELECTOR(input->dataType(), return determinant_, (context, input, output), SD_FLOAT_NATIVE);
NDArray::registerSpecialUse({output}, {input});
}
template <typename T>
sd::Status logAbsDeterminant_(LaunchContext *context, NDArray *input, NDArray *output) {
sd::LongType n = input->sizeAt(-1);
sd::LongType n2 = n * n;
std::vector<int> dims();
auto packX =
ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {input->rankOf() - 2, input->rankOf() - 1});
// auto packZ = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {output->rankOf() - 1});
DataType dtype = input->dataType();
if (dtype != DataType::DOUBLE) dtype = DataType::FLOAT32;
auto matrix = NDArrayFactory::create(input->ordering(), {n, n}, dtype, context); //, block.getWorkspace());
auto det = NDArrayFactory::create<T>(1, context);
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input});
dim3 launchDims(256, 256, 1024);
output->assign(0.f);
for (int e = 0; e < output->lengthOf(); e++) {
sd::LongType pos = e * n2;
// if (matrix.dataType() == input->dataType())
fillMatrix<T, T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), pos, n);
// else
// fillMatrix<T, float><<<launchDims.x, launchDims.y, launchDims.z,
// *stream>>>(matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(),
// input->special(), pos, n);
// if (matrix.dataType() == input->dataType())
lup_<T, int>(context, &matrix, nullptr, nullptr);
// else
// lup_<float>(context, &matrix, nullptr, nullptr);
auto offset = shape::getIndexOffset(e, output->shapeInfo());
auto inputBuf = reinterpret_cast<T *>(matrix.specialBuffer());
auto outputBuf = reinterpret_cast<T *>(output->specialBuffer()) + offset;
// if (matrix.dataType() == input->dataType())
determinantLogKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(inputBuf, outputBuf, n);
// else
// determinantLogKernel<T, float><<<launchDims.x, launchDims.y, launchDims.z, *stream >>> (inputBuf,
// outputBuf, n);
}
NDArray::registerSpecialUse({output}, {input});
return sd::Status::OK;
}
sd::Status logAbsDeterminant(sd::LaunchContext *context, NDArray *input, NDArray *output) {
NDArray::prepareSpecialUse({output}, {input});
BUILD_SINGLE_SELECTOR(input->dataType(), return logAbsDeterminant_, (context, input, output), SD_FLOAT_NATIVE);
NDArray::registerSpecialUse({output}, {input});
}
template <typename T>
static SD_KERNEL void fillLowerUpperKernel(void *lowerBuf, const sd::LongType *lowerShape, void *upperBuf,
const sd::LongType *upperShape, void *matrixBuf,
const sd::LongType *matrixShape, sd::LongType n) {
__shared__ T *lowerMatrix;
__shared__ T *upperMatrix;
__shared__ T *matrix;
if (threadIdx.x == 0) {
lowerMatrix = reinterpret_cast<T *>(lowerBuf);
upperMatrix = reinterpret_cast<T *>(upperBuf);
matrix = reinterpret_cast<T *>(matrixBuf);
}
__syncthreads();
for (int k = blockIdx.x; k < n; k += gridDim.x) { // and then put all values under main diagonal on to it
for (int j = threadIdx.x; j < n; j += blockDim.x) {
sd::LongType posX[] = {k, j};
sd::LongType posD[] = {j, j};
auto xPos = shape::getOffset(lowerShape, posX);
auto yPos = shape::getOffset(upperShape, posX);
auto iPos = shape::getOffset(matrixShape, posX);
auto dPos = shape::getOffset(matrixShape, posD);
if (k >= j)
lowerMatrix[xPos] = matrix[iPos]; //(k, j);
else
upperMatrix[yPos] = matrix[iPos]; // k, j);
}
}
}
template <typename T>
static sd::Status inverse_(sd::LaunchContext *context, NDArray *input, NDArray *output) {
auto n = input->sizeAt(-1);
auto n2 = n * n;
auto dtype = DataTypeUtils::fromT<T>(); // input->dataType();
// if (dtype != DataType::DOUBLE)
// dtype = DataType::FLOAT32;
NDArray matrix = NDArrayFactory::create('c', {n, n}, dtype, context);
NDArray upper = NDArrayFactory::create('c', {n, n}, dtype, context);
NDArray lower = NDArrayFactory::create('c', {n, n}, dtype, context);
NDArray compound = NDArrayFactory::create('c', {n, n}, dtype, context);
NDArray permutation = NDArrayFactory::create('c', {n, n}, dtype, context);
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(),
{input->rankOf() - 2, input->rankOf() - 1});
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(),
{output->rankOf() - 2, output->rankOf() - 1});
auto stream = context->getCudaStream();
for (auto i = 0LL; i < packX.numberOfTads(); i++) {
fillMatrix<T, T><<<1, n2, 1024, *stream>>>(matrix.specialBuffer(), matrix.specialShapeInfo(),
input->specialBuffer(), input->specialShapeInfo(), i * n2, n);
matrix.tickWriteDevice();
// compound.assign(matrix);
// if (matrix.dataType() == input->dataType())
lup_<T, int>(context, &matrix, nullptr, nullptr);
fillLowerUpperKernel<T><<<n, n, 1024, *stream>>>(lower.specialBuffer(), lower.specialShapeInfo(),
upper.specialBuffer(), upper.specialShapeInfo(),
matrix.specialBuffer(), matrix.specialShapeInfo(), n);
lower.tickWriteDevice();
upper.tickWriteDevice();
// lower.printIndexedBuffer("LOWER");
// upper.printIndexedBuffer("UPPER");
matrix.assign(0);
invertUpperMatrix(context, &upper, &matrix); // U^{-1}
matrix.tickWriteDevice();
// matrix.printIndexedBuffer("Upper Inverted");
compound.assign(0);
invertLowerMatrix(context, &lower, &compound); // L{-1}
compound.tickWriteDevice();
// compound.printIndexedBuffer("Lower Inverted");
// matrix.tickWriteDevice();
// compound.tickWriteDevice();
sd::MmulHelper::mmul(&matrix, &compound, &upper, 1.0, 0.0);
upper.tickWriteDevice();
// upper.printIndexedBuffer("Full inverted");
returnMatrix<T><<<1, n2, 1024, *stream>>>(output->specialBuffer(), output->specialShapeInfo(),
upper.specialBuffer(), upper.specialShapeInfo(), i * n2, n);
}
return sd::Status::OK;
}
sd::Status inverse(sd::LaunchContext *context, NDArray *input, NDArray *output) {
NDArray::prepareSpecialUse({output}, {input});
BUILD_SINGLE_SELECTOR(input->dataType(), return inverse_, (context, input, output), SD_FLOAT_NATIVE);
NDArray::registerSpecialUse({output}, {input});
}
bool checkCholeskyInput(sd::LaunchContext *context, NDArray const *input) { return true; }
template <typename F>
SD_KERNEL void fillBatchKernel(F **dArrayBatch, F *buf, const sd::LongType *offsets, sd::LongType batchSize) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto i = start; i < batchSize; i += step) {
dArrayBatch[i] = buf + offsets[i];
}
}
template <typename F>
SD_KERNEL void adjustResultsKernel(F *dArray, const sd::LongType *shape, const sd::LongType *offsets,
sd::LongType batchSize, sd::LongType n) {
// auto i = blockIdx.x * blockDim.x + threadIdx.x;
sd::LongType *shapeOf = shape::shapeOf(shape);
sd::LongType *strideOf = shape::stride(shape);
for (auto i = blockIdx.x; i < batchSize; i += gridDim.x) {
auto current = dArray + offsets[i];
for (auto r = threadIdx.x; r < n; r += blockDim.x) {
for (auto c = r + 1; c < n; c++) {
sd::LongType posRC[] = {r, c};
auto pos = r * n + c; // shape::getOffset(0, shapeOf, strideOf, posRC, 2);
current[pos] = 0.;
}
}
}
}
template <typename F>
sd::Status cholesky__(LaunchContext *context, NDArray *input, NDArray *output, bool inplace) {
if (!inplace) output->assign(input);
auto tempOutput = output->dup();
cusolverDnHandle_t handle = nullptr;
auto n = input->sizeAt(-1);
auto n2 = n * n;
NDArray::prepareSpecialUse({output}, {input});
auto status = cusolverDnCreate(&handle);
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("helpers::cholesky_: Cannot create solver handle", status);
}
F **dArrayBatch = nullptr;
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(
tempOutput.shapeInfo(), {tempOutput.rankOf() - 2, tempOutput.rankOf() - 1});
const sd::LongType batchSize = packX.numberOfTads();
int *dInfoArray = nullptr;
auto err = cudaMalloc((void **)&dArrayBatch, sizeof(F *) * batchSize);
if (err) {
throw cuda_exception::build("helpers::cholesky_: Cannot allocate memory for solver batch data buffer", err);
}
err = cudaMalloc((void **)&dInfoArray, sizeof(int) * batchSize);
if (err) {
throw cuda_exception::build("helpers::cholesky_: Cannot allocate memory for solver errors buffer", err);
}
auto stream = context->getCudaStream();
fillBatchKernel<F><<<1, batchSize, 128, *stream>>>(dArrayBatch, reinterpret_cast<F *>(tempOutput.specialBuffer()),
packX.specialOffsets(), batchSize);
status = cusolverDnSetStream(handle, *stream);
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("helpers::cholesky_: Cannot set stream to solver handle", status);
}
const cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER;
if (input->dataType() == DataType::DOUBLE)
status = cusolverDnDpotrfBatched(handle, uplo, n, (double **)dArrayBatch, n, dInfoArray, batchSize);
else
status = cusolverDnSpotrfBatched(handle, uplo, n, (float **)dArrayBatch, n, dInfoArray, batchSize);
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("helpers::cholesky_: Cholesky factorization failed for batch", status);
}
adjustResultsKernel<F><<<batchSize, n2, 128, *stream>>>(reinterpret_cast<F *>(tempOutput.specialBuffer()),
packX.specialShapeInfo(), packX.specialOffsets(), batchSize,
n);
err = cudaFree(dArrayBatch);
if (err) {
throw cuda_exception::build("helpers::cholesky_: Cannot deallocate memory for solver batch data buffer", err);
}
err = cudaFree(dInfoArray);
if (err) {
throw cuda_exception::build("helpers::cholesky_: Cannot allocate memory for solver errors buffer", err);
}
if (!inplace)
output->assign(tempOutput);
else
input->assign(tempOutput);
NDArray::registerSpecialUse({output}, {input});
return sd::Status::OK;
}
// template <typename T>
sd::Status cholesky_(LaunchContext *context, NDArray *input, NDArray *output, bool inplace) {
NDArray::prepareSpecialUse({output}, {input});
if (input->dataType() == DataType::DOUBLE)
cholesky__<double>(context, input, output, inplace);
else if (input->dataType() == DataType::FLOAT32)
cholesky__<float>(context, input, output, inplace);
else {
std::unique_ptr<NDArray> tempOutput(
NDArrayFactory::create_('c', input->getShapeAsVector(), DataType::FLOAT32, context));
tempOutput->assign(input);
cholesky__<float>(context, tempOutput.get(), tempOutput.get(), true);
output->assign(tempOutput.get());
}
NDArray::registerSpecialUse({output}, {input});
return sd::Status::OK;
}
sd::Status cholesky(sd::LaunchContext *context, NDArray *input, NDArray *output, bool inplace) {
// BUILD_SINGLE_SELECTOR(input->dataType(), return cholesky_, (context, input, output, inplace),
// SD_FLOAT_TYPES);
return cholesky_(context, input, output, inplace);
}
// BUILD_SINGLE_TEMPLATE(template sd::Status cholesky_, (LaunchContext* context, NDArray* input, NDArray* output,
// bool inplace), SD_FLOAT_TYPES);
BUILD_SINGLE_TEMPLATE(template sd::Status inverse_, (sd::LaunchContext * context, NDArray *input, NDArray *output),
SD_FLOAT_NATIVE);
template <typename T>
SD_KERNEL void logDetKernel(const T *inputBuf, const sd::LongType *inputShape, sd::LongType batchNum,
const sd::LongType *tadShape, const sd::LongType *tadOffsets, T *outputBuf,
const sd::LongType *outputShape) {
__shared__ int n;
if (threadIdx.x == 0) {
n = shape::sizeAt(inputShape, -1); // * shape::sizeAt(inputShape, -1);
}
__syncthreads();
auto output = outputBuf;
auto input = inputBuf;
for (auto i = blockIdx.x; i < batchNum; i += gridDim.x) {
auto current = input + tadOffsets[i];
auto zIndex = shape::getIndexOffset(i, outputShape);
for (auto e = threadIdx.x; e < n; e += blockDim.x) {
sd::LongType diag[] = {e, e};
auto xIndex = shape::getOffset(tadShape, diag);
math::atomics::sd_atomicAdd(&output[zIndex], math::sd_log<T, T>(current[xIndex] * current[xIndex]));
}
}
}
template <typename T>
sd::Status logdetFunctor_(sd::LaunchContext *context, NDArray *input, NDArray *output) {
NDArray::prepareSpecialUse({output}, {input});
auto n2 = input->sizeAt(-1) * input->sizeAt(-2);
auto stream = context->getCudaStream();
NDArray tempOutput(*input);
cholesky(context, input, &tempOutput, false);
auto outputBuf = output->dataBuffer()
->specialAsT<T>(); // reinterpret_cast<T*>(output->specialBuffer()); // + e * n2; // + e * n2;
auto inputBuf = tempOutput.dataBuffer()->specialAsT<T>(); // reinterpret_cast<T*>(tempOutput.specialBuffer());
output->nullify();
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(
tempOutput.shapeInfo(), {tempOutput.rankOf() - 2, tempOutput.rankOf() - 1});
logDetKernel<T><<<128, 512, 256, *stream>>>(inputBuf, tempOutput.specialShapeInfo(), packX.numberOfTads(),
packX.specialShapeInfo(), packX.specialOffsets(), outputBuf,
output->specialShapeInfo());
output->tickWriteDevice();
NDArray::registerSpecialUse({output}, {input});
return sd::Status::OK;
}
sd::Status logdetFunctor(sd::LaunchContext *context, NDArray *input, NDArray *output) {
BUILD_SINGLE_SELECTOR(output->dataType(), return logdetFunctor_, (context, input, output), SD_FLOAT_NATIVE);
}
/*
* lup - batched input, batched outputs
* */
sd::Status lup(LaunchContext *context, NDArray *input, NDArray *compound, NDArray *permutation) {
BUILD_DOUBLE_SELECTOR(input->dataType(), permutation->dataType(), lup_, (context, input, compound, permutation),
SD_FLOAT_NATIVE, SD_INDEXING_TYPES);
return sd::Status::OK;
}
// BUILD_SINGLE_TEMPLATE(template sd::Status logdetFunctor_,
// (sd::LaunchContext * context, NDArray * input, NDArray * output), SD_FLOAT_NATIVE);
} // namespace helpers
} // namespace ops
} // namespace sd
|
f0cd6aab030b375ae75684157cd2d0a95b92ce53.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "blurimagecuda.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__
void blurimageCudaDevice(float * R, float* blurredimage, int w, int h) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int BLUR_SIZE = 20;
if(col < w && row < h){
int pixval = 0;
int pixels = 0;
for(int blurrow = -BLUR_SIZE; blurrow < BLUR_SIZE; ++blurrow){
for(int blurcol = -BLUR_SIZE; blurcol < BLUR_SIZE; ++blurcol){
int currow = row + blurrow;
int curcol = col + blurcol;
if(currow > -1 && currow < h && curcol > -1 && curcol < w){
pixval += R[currow * w + curcol];
pixels++;
}
}
}
blurredimage[row * w + col] = ((float)pixval/(float)pixels)/400.0;
}
}
void blurimage(float * R, float * G, float * B, float* blurredR, float* blurredB, float* blurredG, int w, int h){
int size = w * h * sizeof(float);
float *d_R, *d_G, *d_B, *d_blurR, *d_blurG, *d_blurB;
hipMalloc((void **) &d_R, size);
hipMemcpy(d_R, R, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_G, size);
hipMemcpy(d_G, G, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_B, size);
hipMemcpy(d_B, B, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_blurR, size);
hipMalloc((void **) &d_blurG, size);
hipMalloc((void **) &d_blurB, size);
dim3 dimGrid(ceil((float)(w*h)/32.0), ceil((float)(w*h)/32.0), 1);
dim3 dimBlock(32,32,1);
hipLaunchKernelGGL(( blurimageCudaDevice), dim3(dimGrid), dim3(dimBlock), 0, 0, d_R, d_blurR, w, h);
hipMemcpy(blurredR, d_blurR, size, hipMemcpyDeviceToHost);
hipFree(d_R);
hipFree(d_blurR);
hipLaunchKernelGGL(( blurimageCudaDevice), dim3(dimGrid), dim3(dimBlock), 0, 0, d_G, d_blurG, w, h);
hipMemcpy(blurredG, d_blurG, size, hipMemcpyDeviceToHost);
hipFree(d_G);
hipFree(d_blurG);
hipLaunchKernelGGL(( blurimageCudaDevice), dim3(dimGrid), dim3(dimBlock), 0, 0, d_B, d_blurB, w, h);
hipMemcpy(blurredB, d_blurB, size, hipMemcpyDeviceToHost);
hipFree(d_B);
hipFree(d_blurB);
}
__global__
void blurimageCudaDevice1(float * R, float* blurredimage, int w, int h) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int BLUR_SIZE = 2;
if(col < w && row < h){
int pixval = 0;
int pixels = 0;
for(int blurrow = -BLUR_SIZE; blurrow < BLUR_SIZE; ++blurrow){
for(int blurcol = -BLUR_SIZE; blurcol < BLUR_SIZE; ++blurcol){
int currow = row + blurrow;
int curcol = col + blurcol;
if(currow > -1 && currow < h && curcol > -1 && curcol < w){
pixval += R[currow * w + curcol];
pixels++;
}
}
}
blurredimage[row * w + col] = (unsigned char)(pixval/pixels);
}
}
void bluruchar(unsigned char* ucharmat, int w, int h, unsigned char* ucharupdated){
int size = w * h * sizeof(float);
float *d_R, *d_blurR;
hipMalloc((void **) &d_R, size);
hipMemcpy(d_R, ucharmat, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_blurR, size);
dim3 dimGrid(ceil((float)(w*h)/32.0), ceil((float)(w*h)/32.0), 1);
dim3 dimBlock(32,32,1);
hipLaunchKernelGGL(( blurimageCudaDevice1), dim3(dimGrid), dim3(dimBlock), 0, 0, d_R, d_blurR, w, h);
hipMemcpy(ucharupdated, d_blurR, size, hipMemcpyDeviceToHost);
hipFree(d_R);
hipFree(d_blurR);
}
|
f0cd6aab030b375ae75684157cd2d0a95b92ce53.cu
|
#include "blurimagecuda.h"
#include <cuda_runtime.h>
#include <cuda.h>
__global__
void blurimageCudaDevice(float * R, float* blurredimage, int w, int h) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int BLUR_SIZE = 20;
if(col < w && row < h){
int pixval = 0;
int pixels = 0;
for(int blurrow = -BLUR_SIZE; blurrow < BLUR_SIZE; ++blurrow){
for(int blurcol = -BLUR_SIZE; blurcol < BLUR_SIZE; ++blurcol){
int currow = row + blurrow;
int curcol = col + blurcol;
if(currow > -1 && currow < h && curcol > -1 && curcol < w){
pixval += R[currow * w + curcol];
pixels++;
}
}
}
blurredimage[row * w + col] = ((float)pixval/(float)pixels)/400.0;
}
}
void blurimage(float * R, float * G, float * B, float* blurredR, float* blurredB, float* blurredG, int w, int h){
int size = w * h * sizeof(float);
float *d_R, *d_G, *d_B, *d_blurR, *d_blurG, *d_blurB;
cudaMalloc((void **) &d_R, size);
cudaMemcpy(d_R, R, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_G, size);
cudaMemcpy(d_G, G, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_B, size);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_blurR, size);
cudaMalloc((void **) &d_blurG, size);
cudaMalloc((void **) &d_blurB, size);
dim3 dimGrid(ceil((float)(w*h)/32.0), ceil((float)(w*h)/32.0), 1);
dim3 dimBlock(32,32,1);
blurimageCudaDevice<<<dimGrid, dimBlock>>>(d_R, d_blurR, w, h);
cudaMemcpy(blurredR, d_blurR, size, cudaMemcpyDeviceToHost);
cudaFree(d_R);
cudaFree(d_blurR);
blurimageCudaDevice<<<dimGrid, dimBlock>>>(d_G, d_blurG, w, h);
cudaMemcpy(blurredG, d_blurG, size, cudaMemcpyDeviceToHost);
cudaFree(d_G);
cudaFree(d_blurG);
blurimageCudaDevice<<<dimGrid, dimBlock>>>(d_B, d_blurB, w, h);
cudaMemcpy(blurredB, d_blurB, size, cudaMemcpyDeviceToHost);
cudaFree(d_B);
cudaFree(d_blurB);
}
__global__
void blurimageCudaDevice1(float * R, float* blurredimage, int w, int h) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int BLUR_SIZE = 2;
if(col < w && row < h){
int pixval = 0;
int pixels = 0;
for(int blurrow = -BLUR_SIZE; blurrow < BLUR_SIZE; ++blurrow){
for(int blurcol = -BLUR_SIZE; blurcol < BLUR_SIZE; ++blurcol){
int currow = row + blurrow;
int curcol = col + blurcol;
if(currow > -1 && currow < h && curcol > -1 && curcol < w){
pixval += R[currow * w + curcol];
pixels++;
}
}
}
blurredimage[row * w + col] = (unsigned char)(pixval/pixels);
}
}
void bluruchar(unsigned char* ucharmat, int w, int h, unsigned char* ucharupdated){
int size = w * h * sizeof(float);
float *d_R, *d_blurR;
cudaMalloc((void **) &d_R, size);
cudaMemcpy(d_R, ucharmat, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_blurR, size);
dim3 dimGrid(ceil((float)(w*h)/32.0), ceil((float)(w*h)/32.0), 1);
dim3 dimBlock(32,32,1);
blurimageCudaDevice1<<<dimGrid, dimBlock>>>(d_R, d_blurR, w, h);
cudaMemcpy(ucharupdated, d_blurR, size, cudaMemcpyDeviceToHost);
cudaFree(d_R);
cudaFree(d_blurR);
}
|
a9975b975f22955ae4ea470159a2eac54bdfdae1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_yvel_plus_4_left [3][2];
static int dims_update_halo_kernel2_yvel_plus_4_left_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_yvel_plus_4_left_gpu(ACC<double> &yvel0,
ACC<double> &yvel1,
const int* fields)
{
if(fields[FIELD_YVEL0] == 1) yvel0(0,0,0) = yvel0(4,0,0);
if(fields[FIELD_YVEL1] == 1) yvel1(0,0,0) = yvel1(4,0,0);
}
__global__ void ops_update_halo_kernel2_yvel_plus_4_left(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_4_left[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_4_left[0][0] * dims_update_halo_kernel2_yvel_plus_4_left[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_4_left[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_4_left[1][0] * dims_update_halo_kernel2_yvel_plus_4_left[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_yvel_plus_4_left[0][0], dims_update_halo_kernel2_yvel_plus_4_left[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_yvel_plus_4_left[1][0], dims_update_halo_kernel2_yvel_plus_4_left[1][1], arg1);
update_halo_kernel2_yvel_plus_4_left_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_yvel_plus_4_left_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,39)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(39,"update_halo_kernel2_yvel_plus_4_left");
OPS_kernels[39].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_yvel_plus_4_left_h[0][0] || ydim0 != dims_update_halo_kernel2_yvel_plus_4_left_h[0][1] || xdim1 != dims_update_halo_kernel2_yvel_plus_4_left_h[1][0] || ydim1 != dims_update_halo_kernel2_yvel_plus_4_left_h[1][1]) {
dims_update_halo_kernel2_yvel_plus_4_left_h[0][0] = xdim0;
dims_update_halo_kernel2_yvel_plus_4_left_h[0][1] = ydim0;
dims_update_halo_kernel2_yvel_plus_4_left_h[1][0] = xdim1;
dims_update_halo_kernel2_yvel_plus_4_left_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel2_yvel_plus_4_left, dims_update_halo_kernel2_yvel_plus_4_left_h, sizeof(dims_update_halo_kernel2_yvel_plus_4_left)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[39].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_plus_4_left), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[39].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[39].mpi_time += t2-t1;
OPS_kernels[39].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[39].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 39;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 39;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_yvel_plus_4_left_execute;
if (OPS_diags > 1) {
ops_timing_realloc(39,"update_halo_kernel2_yvel_plus_4_left");
}
ops_enqueue_kernel(desc);
}
#endif
|
a9975b975f22955ae4ea470159a2eac54bdfdae1.cu
|
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_yvel_plus_4_left [3][2];
static int dims_update_halo_kernel2_yvel_plus_4_left_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_yvel_plus_4_left_gpu(ACC<double> &yvel0,
ACC<double> &yvel1,
const int* fields)
{
if(fields[FIELD_YVEL0] == 1) yvel0(0,0,0) = yvel0(4,0,0);
if(fields[FIELD_YVEL1] == 1) yvel1(0,0,0) = yvel1(4,0,0);
}
__global__ void ops_update_halo_kernel2_yvel_plus_4_left(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_4_left[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_4_left[0][0] * dims_update_halo_kernel2_yvel_plus_4_left[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_4_left[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_4_left[1][0] * dims_update_halo_kernel2_yvel_plus_4_left[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_yvel_plus_4_left[0][0], dims_update_halo_kernel2_yvel_plus_4_left[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_yvel_plus_4_left[1][0], dims_update_halo_kernel2_yvel_plus_4_left[1][1], arg1);
update_halo_kernel2_yvel_plus_4_left_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_yvel_plus_4_left_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,39)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(39,"update_halo_kernel2_yvel_plus_4_left");
OPS_kernels[39].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_yvel_plus_4_left_h[0][0] || ydim0 != dims_update_halo_kernel2_yvel_plus_4_left_h[0][1] || xdim1 != dims_update_halo_kernel2_yvel_plus_4_left_h[1][0] || ydim1 != dims_update_halo_kernel2_yvel_plus_4_left_h[1][1]) {
dims_update_halo_kernel2_yvel_plus_4_left_h[0][0] = xdim0;
dims_update_halo_kernel2_yvel_plus_4_left_h[0][1] = ydim0;
dims_update_halo_kernel2_yvel_plus_4_left_h[1][0] = xdim1;
dims_update_halo_kernel2_yvel_plus_4_left_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel2_yvel_plus_4_left, dims_update_halo_kernel2_yvel_plus_4_left_h, sizeof(dims_update_halo_kernel2_yvel_plus_4_left)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[39].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_yvel_plus_4_left<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[39].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[39].mpi_time += t2-t1;
OPS_kernels[39].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[39].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 39;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 39;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_yvel_plus_4_left_execute;
if (OPS_diags > 1) {
ops_timing_realloc(39,"update_halo_kernel2_yvel_plus_4_left");
}
ops_enqueue_kernel(desc);
}
#endif
|
8f6af2bfadd89954d957de9c107995605b299cfe.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek)
* @date 2012-2016
* @copyright University of Pennsylvania & STUDENT
*/
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/random.h>
#include <thrust/device_ptr.h>
#include <thrust/partition.h>
#include <util/checkCUDAError.h>
#include <util/tiny_gltf_loader.h>
#include "rasterizeTools.h"
#include "rasterize.h"
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
#define LAMBERT_SHADING 1
#define BLINN_PHONG_SHADING 1
#define BACKFACE_CULLING 1
#define BILINEAR_FILTERING 1
//happens by default now since added check
//#define COLOR_TRIANGLE_INTERPOLATION 1
std::chrono::time_point<std::chrono::high_resolution_clock> clock_now;
static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap;
static int width = 0;
static int height = 0;
static int totalNumPrimitives = 0;
static Primitive* dev_primitives = NULL;
static Fragment* dev_fragmentBuffer = NULL;
static glm::vec3* dev_framebuffer = NULL;
static int* dev_depth = NULL; // you might need this buffer when doing depth test
//lights in scene
static glm::vec3* dev_lights = NULL;
const int num_lights = 2;
//array of objects
std::vector<ObjectData> objects;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4* pbo, int w, int h, glm::vec3* image)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h)
{
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
/**
* Writes fragment colors to the framebuffer
*/
__global__
void render(int w, int h, Fragment* fragmentBuffer, glm::vec3* framebuffer, glm::vec3* lights, int num_lights, glm::vec3 camera_pos)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h)
{
glm::vec3 eye_pos = fragmentBuffer[index].eyePos;
glm::vec3 eye_normal = fragmentBuffer[index].eyeNor;
glm::vec3 pixel_color = fragmentBuffer[index].color;
// TODO: adsd your fragment shader code here
#ifdef LAMBERT_SHADING
for(int i = 0; i < num_lights; i++)
{
glm::vec3& light_source = lights[i];
glm::vec3 light_direction = glm::normalize(light_source - eye_pos);
float amount_of_light = glm::max(glm::dot(light_direction, eye_normal), 0.0f);
#ifdef BLINN_PHONG_SHADING
glm::vec3 eye_direction = glm::normalize(camera_pos - eye_pos);
glm::vec3 half_direction = glm::normalize(light_direction + eye_direction);
amount_of_light = glm::pow(glm::max(glm::dot(light_direction, half_direction), 0.0f), 8.0f);
#endif
pixel_color += fragmentBuffer[index].color * amount_of_light;
}
#endif
//hack to get multiple objects to work (check if don't overwrite if not black) DOESN"T CHECK FOR DEPTH BUFFER...
if(framebuffer[index] == glm::vec3(0.0f))
{
framebuffer[index] = pixel_color;
}
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h)
{
width = w;
height = h;
hipFree(dev_fragmentBuffer);
hipMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment));
hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
hipFree(dev_framebuffer);
hipMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
hipFree(dev_depth);
hipMalloc(&dev_depth, width * height * sizeof(int));
hipFree(dev_lights);
hipMalloc(&dev_lights, num_lights * sizeof(glm::vec3));
hipMemset(dev_lights, 0, num_lights * sizeof(glm::vec3));
//init lights here
glm::vec3 cpu_lights[num_lights] =
{
{ 2.0f, 2.0f, 2.0f },
{ -2.0f, 2.0f, 2.0f },
};
hipMemcpy(dev_lights, cpu_lights, num_lights * sizeof(glm::vec3), hipMemcpyHostToDevice);
checkCUDAError("rasterizeInit");
}
__global__
void initDepth(int w, int h, int* depth)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
depth[index] = INT_MAX;
}
}
/**
* kern function with support for stride to sometimes replace hipMemcpy
* One thread is responsible for copying one component
*/
__global__
void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset,
int componentTypeByteSize)
{
// Attribute (vec3 position)
// component (3 * float)
// byte (4 * byte)
// id of component
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N)
{
int count = i / n;
int offset = i - count * n; // which component of the attribute
for (int j = 0; j < componentTypeByteSize; j++)
{
dev_dst[count * componentTypeByteSize * n
+ offset * componentTypeByteSize
+ j]
=
dev_src[byteOffset
+ count * (byteStride == 0 ? componentTypeByteSize * n : byteStride)
+ offset * componentTypeByteSize
+ j];
}
}
}
__global__
void _nodeMatrixTransform(
int numVertices,
VertexAttributePosition* position,
VertexAttributeNormal* normal,
glm::mat4 MV, glm::mat3 MV_normal)
{
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices)
{
position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f));
normal[vid] = glm::normalize(MV_normal * normal[vid]);
}
}
glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node& n)
{
glm::mat4 curMatrix(1.0);
const std::vector<double>& m = n.matrix;
if (m.size() > 0)
{
// matrix, copy it
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
curMatrix[i][j] = (float)m.at(4 * i + j);
}
}
}
else
{
// no matrix, use rotation, scale, translation
if (n.translation.size() > 0)
{
curMatrix[3][0] = n.translation[0];
curMatrix[3][1] = n.translation[1];
curMatrix[3][2] = n.translation[2];
}
if (n.rotation.size() > 0)
{
glm::mat4 R;
glm::quat q;
q[0] = n.rotation[0];
q[1] = n.rotation[1];
q[2] = n.rotation[2];
R = glm::mat4_cast(q);
curMatrix = curMatrix * R;
}
if (n.scale.size() > 0)
{
curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2]));
}
}
return curMatrix;
}
void traverseNode(
std::map<std::string, glm::mat4>& n2m,
const tinygltf::Scene& scene,
const std::string& nodeString,
const glm::mat4& parentMatrix
)
{
const tinygltf::Node& n = scene.nodes.at(nodeString);
glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n);
n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M));
auto it = n.children.begin();
auto itEnd = n.children.end();
for (; it != itEnd; ++it)
{
traverseNode(n2m, scene, *it, M);
}
}
void set_scene(int index)
{
if(index >= 0 && index < objects.size())
{
dev_primitives = objects[index].dev_primitives;
totalNumPrimitives = objects[index].totalNumPrimitives;
}
}
void copy_object(int index)
{
if (index >= 0 && index < objects.size())
{
//copy over pointer and primitives
ObjectData object_data;
object_data.dev_primitives = dev_primitives;
object_data.totalNumPrimitives = totalNumPrimitives;
object_data.is_copy = true;
objects.push_back(object_data);
}
}
void rasterizeSetBuffers(const tinygltf::Scene& scene)
{
totalNumPrimitives = 0;
std::map<std::string, BufferByte*> bufferViewDevPointers;
// 1. copy all `bufferViews` to device memory
{
std::map<std::string, tinygltf::BufferView>::const_iterator it(
scene.bufferViews.begin());
std::map<std::string, tinygltf::BufferView>::const_iterator itEnd(
scene.bufferViews.end());
for (; it != itEnd; it++)
{
const std::string key = it->first;
const tinygltf::BufferView& bufferView = it->second;
if (bufferView.target == 0)
{
continue; // Unsupported bufferView.
}
const tinygltf::Buffer& buffer = scene.buffers.at(bufferView.buffer);
BufferByte* dev_bufferView;
hipMalloc(&dev_bufferView, bufferView.byteLength);
hipMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength,
hipMemcpyHostToDevice);
checkCUDAError("Set BufferView Device Mem");
bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView));
}
}
// 2. for each mesh:
// for each primitive:
// build device buffer of indices, materail, and each attributes
// and store these pointers in a map
{
std::map<std::string, glm::mat4> nodeString2Matrix;
auto rootNodeNamesList = scene.scenes.at(scene.defaultScene);
{
auto it = rootNodeNamesList.begin();
auto itEnd = rootNodeNamesList.end();
for (; it != itEnd; ++it)
{
traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f));
}
}
// parse through node to access mesh
auto itNode = nodeString2Matrix.begin();
auto itEndNode = nodeString2Matrix.end();
for (; itNode != itEndNode; ++itNode)
{
const tinygltf::Node& N = scene.nodes.at(itNode->first);
const glm::mat4& matrix = itNode->second;
const glm::mat3& matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix)));
auto itMeshName = N.meshes.begin();
auto itEndMeshName = N.meshes.end();
for (; itMeshName != itEndMeshName; ++itMeshName)
{
const tinygltf::Mesh& mesh = scene.meshes.at(*itMeshName);
auto res = mesh2PrimitivesMap.insert(
std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>()));
std::vector<PrimitiveDevBufPointers>& primitiveVector = (res.first)->second;
// for each primitive
for (size_t i = 0; i < mesh.primitives.size(); i++)
{
const tinygltf::Primitive& primitive = mesh.primitives[i];
if (primitive.indices.empty())
return;
// TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes
VertexIndex* dev_indices = NULL;
VertexAttributePosition* dev_position = NULL;
VertexAttributeNormal* dev_normal = NULL;
VertexAttributeTexcoord* dev_texcoord0 = NULL;
// ----------Indices-------------
const tinygltf::Accessor& indexAccessor = scene.accessors.at(primitive.indices);
const tinygltf::BufferView& bufferView = scene.bufferViews.at(indexAccessor.bufferView);
BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView);
// assume type is SCALAR for indices
int n = 1;
int numIndices = indexAccessor.count;
int componentTypeByteSize = sizeof(VertexIndex);
int byteLength = numIndices * n * componentTypeByteSize;
dim3 numThreadsPerBlock(128);
dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
hipMalloc(&dev_indices, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> >(
numIndices,
(BufferByte*)dev_indices,
dev_bufferView,
n,
indexAccessor.byteStride,
indexAccessor.byteOffset,
componentTypeByteSize);
checkCUDAError("Set Index Buffer");
// ---------Primitive Info-------
// Warning: LINE_STRIP is not supported in tinygltfloader
int numPrimitives;
PrimitiveType primitiveType;
switch (primitive.mode)
{
case TINYGLTF_MODE_TRIANGLES:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices / 3;
break;
case TINYGLTF_MODE_TRIANGLE_STRIP:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_TRIANGLE_FAN:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_LINE:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices / 2;
break;
case TINYGLTF_MODE_LINE_LOOP:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices + 1;
break;
case TINYGLTF_MODE_POINTS:
primitiveType = PrimitiveType::Point;
numPrimitives = numIndices;
break;
default:
// output error
break;
};
// ----------Attributes-------------
auto it(primitive.attributes.begin());
auto itEnd(primitive.attributes.end());
int numVertices = 0;
// for each attribute
for (; it != itEnd; it++)
{
const tinygltf::Accessor& accessor = scene.accessors.at(it->second);
const tinygltf::BufferView& bufferView = scene.bufferViews.at(accessor.bufferView);
int n = 1;
if (accessor.type == TINYGLTF_TYPE_SCALAR)
{
n = 1;
}
else if (accessor.type == TINYGLTF_TYPE_VEC2)
{
n = 2;
}
else if (accessor.type == TINYGLTF_TYPE_VEC3)
{
n = 3;
}
else if (accessor.type == TINYGLTF_TYPE_VEC4)
{
n = 4;
}
BufferByte* dev_bufferView = bufferViewDevPointers.at(accessor.bufferView);
BufferByte** dev_attribute = NULL;
numVertices = accessor.count;
int componentTypeByteSize;
// Note: since the type of our attribute array (dev_position) is static (float32)
// We assume the glTF model attribute type are 5126(FLOAT) here
if (it->first.compare("POSITION") == 0)
{
componentTypeByteSize = sizeof(VertexAttributePosition) / n;
dev_attribute = (BufferByte**)&dev_position;
}
else if (it->first.compare("NORMAL") == 0)
{
componentTypeByteSize = sizeof(VertexAttributeNormal) / n;
dev_attribute = (BufferByte**)&dev_normal;
}
else if (it->first.compare("TEXCOORD_0") == 0)
{
componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n;
dev_attribute = (BufferByte**)&dev_texcoord0;
}
std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n';
dim3 numThreadsPerBlock(128);
dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
int byteLength = numVertices * n * componentTypeByteSize;
hipMalloc(dev_attribute, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> >(
n * numVertices,
*dev_attribute,
dev_bufferView,
n,
accessor.byteStride,
accessor.byteOffset,
componentTypeByteSize);
std::string msg = "Set Attribute Buffer: " + it->first;
checkCUDAError(msg.c_str());
}
// malloc for VertexOut
VertexOut* dev_vertexOut;
hipMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut));
checkCUDAError("Malloc VertexOut Buffer");
// ----------Materials-------------
// You can only worry about this part once you started to
// implement textures for your rasterizer
TextureData* dev_diffuseTex = NULL;
int diffuseTexWidth = 0;
int diffuseTexHeight = 0;
if (!primitive.material.empty())
{
const tinygltf::Material& mat = scene.materials.at(primitive.material);
printf("material.name = %s\n", mat.name.c_str());
if (mat.values.find("diffuse") != mat.values.end())
{
std::string diffuseTexName = mat.values.at("diffuse").string_value;
if (scene.textures.find(diffuseTexName) != scene.textures.end())
{
const tinygltf::Texture& tex = scene.textures.at(diffuseTexName);
if (scene.images.find(tex.source) != scene.images.end())
{
const tinygltf::Image& image = scene.images.at(tex.source);
size_t s = image.image.size() * sizeof(TextureData);
hipMalloc(&dev_diffuseTex, s);
hipMemcpy(dev_diffuseTex, &image.image.at(0), s, hipMemcpyHostToDevice);
diffuseTexWidth = image.width;
diffuseTexHeight = image.height;
checkCUDAError("Set Texture Image data");
}
}
}
// TODO: write your code for other materails
// You may have to take a look at tinygltfloader
// You can also use the above code loading diffuse material as a start point
}
// ---------Node hierarchy transform--------
hipDeviceSynchronize();
dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> >(
numVertices,
dev_position,
dev_normal,
matrix,
matrixNormal);
checkCUDAError("Node hierarchy transformation");
// at the end of the for loop of primitive
// push dev pointers to map
primitiveVector.push_back(PrimitiveDevBufPointers{
primitive.mode,
primitiveType,
numPrimitives,
numIndices,
numVertices,
dev_indices,
dev_position,
dev_normal,
dev_texcoord0,
dev_diffuseTex,
diffuseTexWidth,
diffuseTexHeight,
dev_vertexOut //VertexOut
});
totalNumPrimitives += numPrimitives;
} // for each primitive
} // for each mesh
} // for each node
}
// 3. Malloc for dev_primitives
{
hipMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive));
}
// Finally, hipFree raw dev_bufferViews
{
std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin());
std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end());
//bufferViewDevPointers
for (; it != itEnd; it++)
{
hipFree(it->second);
}
checkCUDAError("Free BufferView Device Mem");
}
//copy over pointer and primitives
ObjectData object_data;
object_data.dev_primitives = dev_primitives;
object_data.totalNumPrimitives = totalNumPrimitives;
objects.push_back(object_data);
}
__global__
void _vertexTransformAndAssembly(
int numVertices,
PrimitiveDevBufPointers primitive,
glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal,
int width, int height)
{
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices)
{
//textures
primitive.dev_verticesOut[vid].dev_diffuseTex = 0;
//check if textures exist
if (primitive.dev_diffuseTex)
{
primitive.dev_verticesOut[vid].dev_diffuseTex = primitive.dev_diffuseTex;
primitive.dev_verticesOut[vid].texWidth = primitive.diffuseTexWidth;
primitive.dev_verticesOut[vid].texHeight = primitive.diffuseTexHeight;
primitive.dev_verticesOut[vid].texcoord0 = primitive.dev_texcoord0[vid];
}
// TODO: Apply vertex transformation here
// Multiply the MVP matrix for each vertex position, this will transform everything into clipping space
// Then divide the pos by its w element to transform into NDC space
// Finally transform x and y to viewport space
//clip
primitive.dev_verticesOut[vid].pos = MVP * glm::vec4(primitive.dev_position[vid], 1.0f);
//ndc
primitive.dev_verticesOut[vid].pos /= primitive.dev_verticesOut[vid].pos.w;
//screen space
const float width_ndc = static_cast<float>(width) * 0.5f;
const float height_ndc = static_cast<float>(height) * 0.5f;
primitive.dev_verticesOut[vid].pos.x = width_ndc * (primitive.dev_verticesOut[vid].pos.x + 1.0f);
primitive.dev_verticesOut[vid].pos.y = height_ndc * (1.0f - primitive.dev_verticesOut[vid].pos.y);
primitive.dev_verticesOut[vid].pos.z = 0.5f * (1.0f + primitive.dev_verticesOut[vid].pos.z);
// TODO: Apply vertex assembly here
// Assemble all attribute arraies into the primitive array
primitive.dev_verticesOut[vid].eyeNor = MV_normal * primitive.dev_normal[vid];
primitive.dev_verticesOut[vid].eyeNor = glm::normalize(primitive.dev_verticesOut[vid].eyeNor);
primitive.dev_verticesOut[vid].eyePos = glm::vec3(MV * glm::vec4(primitive.dev_position[vid], 1.0f));
}
}
static int curPrimitiveBeginId = 0;
__global__
void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives,
PrimitiveDevBufPointers primitive)
{
// index id
int iid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (iid < numIndices)
{
// TODO: uncomment the following code for a start
// This is primitive assembly for triangles
int pid; // id for cur primitives vector
if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES)
{
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
// TODO: other primitive types (point, line)
}
}
__global__ void backface_cull(int totalPrimitives, glm::vec3 camera, Primitive* primitives)
{
int vid = blockIdx.x * blockDim.x + threadIdx.x;
if (vid < totalPrimitives)
{
glm::vec3 triangle_pos[3] =
{
glm::vec3(primitives[vid].v[0].pos),
glm::vec3(primitives[vid].v[1].pos),
glm::vec3(primitives[vid].v[2].pos)
};
glm::vec3 dir_1 = triangle_pos[0] - triangle_pos[1];
glm::vec3 dir_2 = triangle_pos[2] - triangle_pos[2];
glm::vec3 triangle_normal = glm::cross(dir_1, dir_2);
primitives[vid].backface_culled = false;
if(glm::dot(camera, triangle_normal) < 0.0f)
{
primitives[vid].backface_culled = true;
}
}
}
//stream compaction for backface culling
struct HostDeviceSteamCompactionCallback {
__host__ __device__ bool operator()(const Primitive &p) {
return !p.backface_culled;
};
};
__global__ void rasterize_triangles(int totalPrimitives, int width, int height, int* depths,
Primitive* primitives, Fragment* fragments)
{
int vid = blockIdx.x * blockDim.x + threadIdx.x;
if (vid < totalPrimitives)
{
glm::vec3 triangle_pos[3] =
{
glm::vec3(primitives[vid].v[0].pos),
glm::vec3(primitives[vid].v[1].pos),
glm::vec3(primitives[vid].v[2].pos)
};
glm::vec2 triangle_texcoords[3] =
{
primitives[vid].v[0].texcoord0,
primitives[vid].v[1].texcoord0,
primitives[vid].v[2].texcoord0,
};
int texture_width = primitives[vid].v[0].texWidth;
int texture_height = primitives[vid].v[0].texHeight;
//for correct color interpolation
glm::vec3 triangle_colors[3] =
{
glm::vec3(1.0f, 0.0f, 0.0f),
glm::vec3(0.0f, 1.0f, 0.0f),
glm::vec3(0.0f, 0.0f, 1.0f),
};
glm::vec3 eye_pos[3] =
{
primitives[vid].v[0].eyePos,
primitives[vid].v[1].eyePos,
primitives[vid].v[2].eyePos,
};
glm::vec3 eye_normal[3] =
{
primitives[vid].v[0].eyeNor,
primitives[vid].v[1].eyeNor,
primitives[vid].v[2].eyeNor,
};
//get aabb
AABB triangle_aabb = getAABBForTriangle(triangle_pos);
//clamp between screen size
triangle_aabb = [width, height](int min_x, int max_x, int min_y, int max_y)
{
AABB result{};
result.min.x = glm::clamp(min_x, 0, width - 1);
result.max.x = glm::clamp(max_x, 0, width - 1);
result.min.y = glm::clamp(min_y, 0, height - 1);
result.max.y = glm::clamp(max_y, 0, height - 1);
return result;
}(triangle_aabb.min.x, triangle_aabb.max.x,
triangle_aabb.min.y, triangle_aabb.max.y);
//scanline using baycentric
for (int x = triangle_aabb.min.x; x <= triangle_aabb.max.x; x++)
{
for (int y = triangle_aabb.min.y; y <= triangle_aabb.max.y; y++)
{
//caclulate baycentric (if pixel is on triangle)
const glm::vec2 pixel_space{x, y};
const glm::vec3 barycentric_coordinate = calculateBarycentricCoordinate(triangle_pos, pixel_space);
if(isBarycentricCoordInBounds(barycentric_coordinate))
{
float depth = -getZAtCoordinate(barycentric_coordinate, triangle_pos);
float depth_in_int = depth * 1000.0f;
int pixel = y * width + x;
//depth test (get the pixel closest)
const int old_depth = atomicMin(&depths[pixel], depth_in_int);
//fragment shading
//check if depth was closer (draw pixel on top)
if(old_depth != depths[pixel])
{
float eye_pos1_z = eye_pos[0].z;
float eye_pos2_z = eye_pos[1].z;
float eye_pos3_z = eye_pos[2].z;
float bary_correct_x = barycentric_coordinate.x / eye_pos1_z;
float bary_correct_y = barycentric_coordinate.y / eye_pos2_z;
float bary_correct_z = barycentric_coordinate.z / eye_pos3_z;
float perspective_correct_z = 1.0f / (bary_correct_x + bary_correct_y + bary_correct_z);
//debugging depth
//fragments[pixel].color = glm::vec3(depth);
//normals
//fragments[pixel].color = ;
//perspective correct normal
const glm::vec3 perspective_correct_eye_normal =
(
barycentric_coordinate.x * (eye_normal[0] / eye_pos1_z) +
barycentric_coordinate.y * (eye_normal[1] / eye_pos2_z) +
barycentric_coordinate.z * (eye_normal[2] / eye_pos3_z)
) * perspective_correct_z;
fragments[pixel].eyeNor = perspective_correct_eye_normal;
//textures
//perspective correct texture coordinate
const glm::vec2 perspective_correct_texcoord =
(
barycentric_coordinate.x * (triangle_texcoords[0] / eye_pos1_z) +
barycentric_coordinate.y * (triangle_texcoords[1] / eye_pos2_z) +
barycentric_coordinate.z * (triangle_texcoords[2] / eye_pos3_z)
) * perspective_correct_z;
fragments[pixel].texcoord0 = perspective_correct_texcoord;
TextureData* diffuse_texture = primitives[vid].v->dev_diffuseTex;
fragments[pixel].dev_diffuseTex = diffuse_texture;
if(diffuse_texture)
{
auto sample_texture = [&](int u, int v)
{
int v_height = v * texture_width;
int u_v_index = 3 * (u + v_height);
glm::vec3 texture_color =
{
diffuse_texture[u_v_index],
diffuse_texture[u_v_index + 1],
diffuse_texture[u_v_index + 2]
};
//put in range 0 -> 1
texture_color /= 255.0f;
return texture_color;
};
//bilinear
#ifdef BILINEAR_FILTERING
float u_float = static_cast<float>(texture_width) * perspective_correct_texcoord[0];
float v_float = static_cast<float>(texture_height) * perspective_correct_texcoord[1];
//4 points
int u_int = static_cast<int>(glm::floor(u_float));
int v_int = static_cast<int>(glm::floor(v_float));
int u_int_plus_one = glm::clamp(u_int + 1, 0, texture_width - 1);
int v_int_plus_one = glm::clamp(v_int + 1, 0, texture_height - 1);
//calculate difference (will be used in mixing
float u_diff = u_float - static_cast<float>(u_int);
float v_diff = v_float - static_cast<float>(v_int);
//sample 4 points (bilinear mix between them)
const auto sample_mix_1 = glm::mix(sample_texture(u_int, v_int), sample_texture(u_int, v_int_plus_one), v_diff);
const auto sample_mix_2 = glm::mix(sample_texture(u_int_plus_one, v_int), sample_texture(u_int_plus_one, v_int_plus_one), v_diff);
const auto sample_mix_final = glm::mix(sample_mix_1, sample_mix_2, u_diff);
fragments[pixel].color = sample_mix_final;
#else
//not bilinear
int u = texture_width * perspective_correct_texcoord[0];
int v = texture_height * perspective_correct_texcoord[1];
fragments[pixel].color = sample_texture(u, v);
#endif
}
//force color triangle interpolation (no texture)
else
{
//perspective correct color
const glm::vec3 perspective_correct_color =
(
barycentric_coordinate.x * (triangle_colors[0] / eye_pos1_z) +
barycentric_coordinate.y * (triangle_colors[1] / eye_pos2_z) +
barycentric_coordinate.z * (triangle_colors[2] / eye_pos3_z)
) * perspective_correct_z;
fragments[pixel].color = perspective_correct_color;
}
#ifdef COLOR_TRIANGLE_INTERPOLATION
//perspective correct color
const glm::vec3 perspective_correct_color =
(
barycentric_coordinate.x * (triangle_colors[0] / eye_pos1_z) +
barycentric_coordinate.y * (triangle_colors[1] / eye_pos2_z) +
barycentric_coordinate.z * (triangle_colors[2] / eye_pos3_z)
) * perspective_correct_z;
fragments[pixel].color = perspective_correct_color;
#endif
}
}
}
}
}
}
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
/**
* Perform rasterization.
*/
void rasterize(uchar4* pbo, const glm::mat4& MVP, const glm::mat4& MV, const glm::mat3 MV_normal, glm::vec3& camera_pos)
{
blockCount2d = dim3((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
// Execute your rasterization pipeline here
// (See README for rasterization pipeline outline.)
// Vertex Process & primitive assembly
{
curPrimitiveBeginId = 0;
dim3 numThreadsPerBlock(128);
auto it = mesh2PrimitivesMap.begin();
auto itEnd = mesh2PrimitivesMap.end();
for (; it != itEnd; ++it)
{
auto p = (it->second).begin(); // each primitive
auto pEnd = (it->second).end();
for (; p != pEnd; ++p)
{
dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
#ifdef PRINT_CLOCK
clock_now = std::chrono::high_resolution_clock::now();
#endif
_vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(
p->numVertices, *p, MVP, MV, MV_normal, width, height);
checkCUDAError("Vertex Processing");
hipDeviceSynchronize();
#ifdef PRINT_CLOCK
std::cout << std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::high_resolution_clock::now() - clock_now).count() << std::endl;
#endif
#ifdef PRINT_CLOCK
clock_now = std::chrono::high_resolution_clock::now();
#endif
_primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> >
(p->numIndices,
curPrimitiveBeginId,
dev_primitives,
*p);
checkCUDAError("Primitive Assembly");
hipDeviceSynchronize();
#ifdef PRINT_CLOCK
std::cout << std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::high_resolution_clock::now() - clock_now).count() << std::endl;
#endif
curPrimitiveBeginId += p->numPrimitives;
}
}
checkCUDAError("Vertex Processing and Primitive Assembly");
}
hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
initDepth << <blockCount2d, blockSize2d >> >(width, height, dev_depth);
const int blockSize1d = 128;
int remaining_primitives = totalNumPrimitives;
dim3 num_triangles((remaining_primitives + blockSize1d - 1) / blockSize1d);
//backface culling
#ifdef BACKFACE_CULLING
hipLaunchKernelGGL(( backface_cull), dim3(num_triangles), dim3(blockSize1d), 0, 0, remaining_primitives, camera_pos, dev_primitives);
//stream compact away backface culled triangled using thrust
thrust::device_ptr<Primitive> dev_primitive_ptr_start = thrust::device_pointer_cast(dev_primitives);
thrust::device_ptr<Primitive> dev_primitive_ptr_end = thrust::device_pointer_cast(
dev_primitives + remaining_primitives);
//perform stream compaction
thrust::device_ptr<Primitive> new_dev_primitive_end = thrust::partition(
dev_primitive_ptr_start, dev_primitive_ptr_end, HostDeviceSteamCompactionCallback());
Primitive* dev_primitive_end = thrust::raw_pointer_cast(new_dev_primitive_end);
//update the primitive counts
remaining_primitives = dev_primitive_end - dev_primitives;
#endif
// TODO: rasterize
#ifdef PRINT_CLOCK
clock_now = std::chrono::high_resolution_clock::now();
#endif
hipLaunchKernelGGL(( rasterize_triangles), dim3(num_triangles), dim3(blockSize1d), 0, 0, remaining_primitives, width, height, dev_depth, dev_primitives, dev_fragmentBuffer);
hipDeviceSynchronize();
#ifdef PRINT_CLOCK
std::cout << std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::high_resolution_clock::now() - clock_now).count() << std::endl;
#endif
// Copy depthbuffer colors into framebuffer
glm::vec3 camera_pos_in_MV = glm::vec3(MV * glm::vec4(camera_pos, 1.0f));
#ifdef PRINT_CLOCK
clock_now = std::chrono::high_resolution_clock::now();
#endif
render << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer, dev_framebuffer, dev_lights, num_lights, camera_pos_in_MV);
hipDeviceSynchronize();
#ifdef PRINT_CLOCK
std::cout << std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::high_resolution_clock::now() - clock_now).count() << std::endl;
#endif
checkCUDAError("fragment shader");
}
void zero_frame_buffer()
{
hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
}
void write_to_pbo(uchar4* pbo)
{
// Copy framebuffer into OpenGL buffer for OpenGL previewing
hipLaunchKernelGGL(( sendImageToPBO), dim3(blockCount2d), dim3(blockSize2d), 0, 0, pbo, width, height, dev_framebuffer);
checkCUDAError("copy render result to pbo");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree()
{
// deconstruct primitives attribute/indices device buffer
auto it(mesh2PrimitivesMap.begin());
auto itEnd(mesh2PrimitivesMap.end());
for (; it != itEnd; ++it)
{
for (auto p = it->second.begin(); p != it->second.end(); ++p)
{
hipFree(p->dev_indices);
hipFree(p->dev_position);
hipFree(p->dev_normal);
hipFree(p->dev_texcoord0);
hipFree(p->dev_diffuseTex);
hipFree(p->dev_verticesOut);
//TODO: release other attributes and materials
}
}
////////////
// hipFree(dev_primitives);
// dev_primitives = NULL;
for(auto& object : objects)
{
free(object.dev_primitives);
}
hipFree(dev_fragmentBuffer);
dev_fragmentBuffer = NULL;
hipFree(dev_framebuffer);
dev_framebuffer = NULL;
hipFree(dev_depth);
dev_depth = NULL;
hipFree(dev_lights);
dev_lights = NULL;
checkCUDAError("rasterize Free");
}
|
8f6af2bfadd89954d957de9c107995605b299cfe.cu
|
/**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek)
* @date 2012-2016
* @copyright University of Pennsylvania & STUDENT
*/
#include <cmath>
#include <cstdio>
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/random.h>
#include <thrust/device_ptr.h>
#include <thrust/partition.h>
#include <util/checkCUDAError.h>
#include <util/tiny_gltf_loader.h>
#include "rasterizeTools.h"
#include "rasterize.h"
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
#define LAMBERT_SHADING 1
#define BLINN_PHONG_SHADING 1
#define BACKFACE_CULLING 1
#define BILINEAR_FILTERING 1
//happens by default now since added check
//#define COLOR_TRIANGLE_INTERPOLATION 1
std::chrono::time_point<std::chrono::high_resolution_clock> clock_now;
static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap;
static int width = 0;
static int height = 0;
static int totalNumPrimitives = 0;
static Primitive* dev_primitives = NULL;
static Fragment* dev_fragmentBuffer = NULL;
static glm::vec3* dev_framebuffer = NULL;
static int* dev_depth = NULL; // you might need this buffer when doing depth test
//lights in scene
static glm::vec3* dev_lights = NULL;
const int num_lights = 2;
//array of objects
std::vector<ObjectData> objects;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4* pbo, int w, int h, glm::vec3* image)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h)
{
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
/**
* Writes fragment colors to the framebuffer
*/
__global__
void render(int w, int h, Fragment* fragmentBuffer, glm::vec3* framebuffer, glm::vec3* lights, int num_lights, glm::vec3 camera_pos)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h)
{
glm::vec3 eye_pos = fragmentBuffer[index].eyePos;
glm::vec3 eye_normal = fragmentBuffer[index].eyeNor;
glm::vec3 pixel_color = fragmentBuffer[index].color;
// TODO: adsd your fragment shader code here
#ifdef LAMBERT_SHADING
for(int i = 0; i < num_lights; i++)
{
glm::vec3& light_source = lights[i];
glm::vec3 light_direction = glm::normalize(light_source - eye_pos);
float amount_of_light = glm::max(glm::dot(light_direction, eye_normal), 0.0f);
#ifdef BLINN_PHONG_SHADING
glm::vec3 eye_direction = glm::normalize(camera_pos - eye_pos);
glm::vec3 half_direction = glm::normalize(light_direction + eye_direction);
amount_of_light = glm::pow(glm::max(glm::dot(light_direction, half_direction), 0.0f), 8.0f);
#endif
pixel_color += fragmentBuffer[index].color * amount_of_light;
}
#endif
//hack to get multiple objects to work (check if don't overwrite if not black) DOESN"T CHECK FOR DEPTH BUFFER...
if(framebuffer[index] == glm::vec3(0.0f))
{
framebuffer[index] = pixel_color;
}
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h)
{
width = w;
height = h;
cudaFree(dev_fragmentBuffer);
cudaMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment));
cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
cudaFree(dev_framebuffer);
cudaMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
cudaFree(dev_depth);
cudaMalloc(&dev_depth, width * height * sizeof(int));
cudaFree(dev_lights);
cudaMalloc(&dev_lights, num_lights * sizeof(glm::vec3));
cudaMemset(dev_lights, 0, num_lights * sizeof(glm::vec3));
//init lights here
glm::vec3 cpu_lights[num_lights] =
{
{ 2.0f, 2.0f, 2.0f },
{ -2.0f, 2.0f, 2.0f },
};
cudaMemcpy(dev_lights, cpu_lights, num_lights * sizeof(glm::vec3), cudaMemcpyHostToDevice);
checkCUDAError("rasterizeInit");
}
__global__
void initDepth(int w, int h, int* depth)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
depth[index] = INT_MAX;
}
}
/**
* kern function with support for stride to sometimes replace cudaMemcpy
* One thread is responsible for copying one component
*/
__global__
void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset,
int componentTypeByteSize)
{
// Attribute (vec3 position)
// component (3 * float)
// byte (4 * byte)
// id of component
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N)
{
int count = i / n;
int offset = i - count * n; // which component of the attribute
for (int j = 0; j < componentTypeByteSize; j++)
{
dev_dst[count * componentTypeByteSize * n
+ offset * componentTypeByteSize
+ j]
=
dev_src[byteOffset
+ count * (byteStride == 0 ? componentTypeByteSize * n : byteStride)
+ offset * componentTypeByteSize
+ j];
}
}
}
__global__
void _nodeMatrixTransform(
int numVertices,
VertexAttributePosition* position,
VertexAttributeNormal* normal,
glm::mat4 MV, glm::mat3 MV_normal)
{
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices)
{
position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f));
normal[vid] = glm::normalize(MV_normal * normal[vid]);
}
}
glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node& n)
{
glm::mat4 curMatrix(1.0);
const std::vector<double>& m = n.matrix;
if (m.size() > 0)
{
// matrix, copy it
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
curMatrix[i][j] = (float)m.at(4 * i + j);
}
}
}
else
{
// no matrix, use rotation, scale, translation
if (n.translation.size() > 0)
{
curMatrix[3][0] = n.translation[0];
curMatrix[3][1] = n.translation[1];
curMatrix[3][2] = n.translation[2];
}
if (n.rotation.size() > 0)
{
glm::mat4 R;
glm::quat q;
q[0] = n.rotation[0];
q[1] = n.rotation[1];
q[2] = n.rotation[2];
R = glm::mat4_cast(q);
curMatrix = curMatrix * R;
}
if (n.scale.size() > 0)
{
curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2]));
}
}
return curMatrix;
}
void traverseNode(
std::map<std::string, glm::mat4>& n2m,
const tinygltf::Scene& scene,
const std::string& nodeString,
const glm::mat4& parentMatrix
)
{
const tinygltf::Node& n = scene.nodes.at(nodeString);
glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n);
n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M));
auto it = n.children.begin();
auto itEnd = n.children.end();
for (; it != itEnd; ++it)
{
traverseNode(n2m, scene, *it, M);
}
}
void set_scene(int index)
{
if(index >= 0 && index < objects.size())
{
dev_primitives = objects[index].dev_primitives;
totalNumPrimitives = objects[index].totalNumPrimitives;
}
}
void copy_object(int index)
{
if (index >= 0 && index < objects.size())
{
//copy over pointer and primitives
ObjectData object_data;
object_data.dev_primitives = dev_primitives;
object_data.totalNumPrimitives = totalNumPrimitives;
object_data.is_copy = true;
objects.push_back(object_data);
}
}
void rasterizeSetBuffers(const tinygltf::Scene& scene)
{
totalNumPrimitives = 0;
std::map<std::string, BufferByte*> bufferViewDevPointers;
// 1. copy all `bufferViews` to device memory
{
std::map<std::string, tinygltf::BufferView>::const_iterator it(
scene.bufferViews.begin());
std::map<std::string, tinygltf::BufferView>::const_iterator itEnd(
scene.bufferViews.end());
for (; it != itEnd; it++)
{
const std::string key = it->first;
const tinygltf::BufferView& bufferView = it->second;
if (bufferView.target == 0)
{
continue; // Unsupported bufferView.
}
const tinygltf::Buffer& buffer = scene.buffers.at(bufferView.buffer);
BufferByte* dev_bufferView;
cudaMalloc(&dev_bufferView, bufferView.byteLength);
cudaMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength,
cudaMemcpyHostToDevice);
checkCUDAError("Set BufferView Device Mem");
bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView));
}
}
// 2. for each mesh:
// for each primitive:
// build device buffer of indices, materail, and each attributes
// and store these pointers in a map
{
std::map<std::string, glm::mat4> nodeString2Matrix;
auto rootNodeNamesList = scene.scenes.at(scene.defaultScene);
{
auto it = rootNodeNamesList.begin();
auto itEnd = rootNodeNamesList.end();
for (; it != itEnd; ++it)
{
traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f));
}
}
// parse through node to access mesh
auto itNode = nodeString2Matrix.begin();
auto itEndNode = nodeString2Matrix.end();
for (; itNode != itEndNode; ++itNode)
{
const tinygltf::Node& N = scene.nodes.at(itNode->first);
const glm::mat4& matrix = itNode->second;
const glm::mat3& matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix)));
auto itMeshName = N.meshes.begin();
auto itEndMeshName = N.meshes.end();
for (; itMeshName != itEndMeshName; ++itMeshName)
{
const tinygltf::Mesh& mesh = scene.meshes.at(*itMeshName);
auto res = mesh2PrimitivesMap.insert(
std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>()));
std::vector<PrimitiveDevBufPointers>& primitiveVector = (res.first)->second;
// for each primitive
for (size_t i = 0; i < mesh.primitives.size(); i++)
{
const tinygltf::Primitive& primitive = mesh.primitives[i];
if (primitive.indices.empty())
return;
// TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes
VertexIndex* dev_indices = NULL;
VertexAttributePosition* dev_position = NULL;
VertexAttributeNormal* dev_normal = NULL;
VertexAttributeTexcoord* dev_texcoord0 = NULL;
// ----------Indices-------------
const tinygltf::Accessor& indexAccessor = scene.accessors.at(primitive.indices);
const tinygltf::BufferView& bufferView = scene.bufferViews.at(indexAccessor.bufferView);
BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView);
// assume type is SCALAR for indices
int n = 1;
int numIndices = indexAccessor.count;
int componentTypeByteSize = sizeof(VertexIndex);
int byteLength = numIndices * n * componentTypeByteSize;
dim3 numThreadsPerBlock(128);
dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
cudaMalloc(&dev_indices, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> >(
numIndices,
(BufferByte*)dev_indices,
dev_bufferView,
n,
indexAccessor.byteStride,
indexAccessor.byteOffset,
componentTypeByteSize);
checkCUDAError("Set Index Buffer");
// ---------Primitive Info-------
// Warning: LINE_STRIP is not supported in tinygltfloader
int numPrimitives;
PrimitiveType primitiveType;
switch (primitive.mode)
{
case TINYGLTF_MODE_TRIANGLES:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices / 3;
break;
case TINYGLTF_MODE_TRIANGLE_STRIP:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_TRIANGLE_FAN:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_LINE:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices / 2;
break;
case TINYGLTF_MODE_LINE_LOOP:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices + 1;
break;
case TINYGLTF_MODE_POINTS:
primitiveType = PrimitiveType::Point;
numPrimitives = numIndices;
break;
default:
// output error
break;
};
// ----------Attributes-------------
auto it(primitive.attributes.begin());
auto itEnd(primitive.attributes.end());
int numVertices = 0;
// for each attribute
for (; it != itEnd; it++)
{
const tinygltf::Accessor& accessor = scene.accessors.at(it->second);
const tinygltf::BufferView& bufferView = scene.bufferViews.at(accessor.bufferView);
int n = 1;
if (accessor.type == TINYGLTF_TYPE_SCALAR)
{
n = 1;
}
else if (accessor.type == TINYGLTF_TYPE_VEC2)
{
n = 2;
}
else if (accessor.type == TINYGLTF_TYPE_VEC3)
{
n = 3;
}
else if (accessor.type == TINYGLTF_TYPE_VEC4)
{
n = 4;
}
BufferByte* dev_bufferView = bufferViewDevPointers.at(accessor.bufferView);
BufferByte** dev_attribute = NULL;
numVertices = accessor.count;
int componentTypeByteSize;
// Note: since the type of our attribute array (dev_position) is static (float32)
// We assume the glTF model attribute type are 5126(FLOAT) here
if (it->first.compare("POSITION") == 0)
{
componentTypeByteSize = sizeof(VertexAttributePosition) / n;
dev_attribute = (BufferByte**)&dev_position;
}
else if (it->first.compare("NORMAL") == 0)
{
componentTypeByteSize = sizeof(VertexAttributeNormal) / n;
dev_attribute = (BufferByte**)&dev_normal;
}
else if (it->first.compare("TEXCOORD_0") == 0)
{
componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n;
dev_attribute = (BufferByte**)&dev_texcoord0;
}
std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n';
dim3 numThreadsPerBlock(128);
dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
int byteLength = numVertices * n * componentTypeByteSize;
cudaMalloc(dev_attribute, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> >(
n * numVertices,
*dev_attribute,
dev_bufferView,
n,
accessor.byteStride,
accessor.byteOffset,
componentTypeByteSize);
std::string msg = "Set Attribute Buffer: " + it->first;
checkCUDAError(msg.c_str());
}
// malloc for VertexOut
VertexOut* dev_vertexOut;
cudaMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut));
checkCUDAError("Malloc VertexOut Buffer");
// ----------Materials-------------
// You can only worry about this part once you started to
// implement textures for your rasterizer
TextureData* dev_diffuseTex = NULL;
int diffuseTexWidth = 0;
int diffuseTexHeight = 0;
if (!primitive.material.empty())
{
const tinygltf::Material& mat = scene.materials.at(primitive.material);
printf("material.name = %s\n", mat.name.c_str());
if (mat.values.find("diffuse") != mat.values.end())
{
std::string diffuseTexName = mat.values.at("diffuse").string_value;
if (scene.textures.find(diffuseTexName) != scene.textures.end())
{
const tinygltf::Texture& tex = scene.textures.at(diffuseTexName);
if (scene.images.find(tex.source) != scene.images.end())
{
const tinygltf::Image& image = scene.images.at(tex.source);
size_t s = image.image.size() * sizeof(TextureData);
cudaMalloc(&dev_diffuseTex, s);
cudaMemcpy(dev_diffuseTex, &image.image.at(0), s, cudaMemcpyHostToDevice);
diffuseTexWidth = image.width;
diffuseTexHeight = image.height;
checkCUDAError("Set Texture Image data");
}
}
}
// TODO: write your code for other materails
// You may have to take a look at tinygltfloader
// You can also use the above code loading diffuse material as a start point
}
// ---------Node hierarchy transform--------
cudaDeviceSynchronize();
dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> >(
numVertices,
dev_position,
dev_normal,
matrix,
matrixNormal);
checkCUDAError("Node hierarchy transformation");
// at the end of the for loop of primitive
// push dev pointers to map
primitiveVector.push_back(PrimitiveDevBufPointers{
primitive.mode,
primitiveType,
numPrimitives,
numIndices,
numVertices,
dev_indices,
dev_position,
dev_normal,
dev_texcoord0,
dev_diffuseTex,
diffuseTexWidth,
diffuseTexHeight,
dev_vertexOut //VertexOut
});
totalNumPrimitives += numPrimitives;
} // for each primitive
} // for each mesh
} // for each node
}
// 3. Malloc for dev_primitives
{
cudaMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive));
}
// Finally, cudaFree raw dev_bufferViews
{
std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin());
std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end());
//bufferViewDevPointers
for (; it != itEnd; it++)
{
cudaFree(it->second);
}
checkCUDAError("Free BufferView Device Mem");
}
//copy over pointer and primitives
ObjectData object_data;
object_data.dev_primitives = dev_primitives;
object_data.totalNumPrimitives = totalNumPrimitives;
objects.push_back(object_data);
}
__global__
void _vertexTransformAndAssembly(
int numVertices,
PrimitiveDevBufPointers primitive,
glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal,
int width, int height)
{
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices)
{
//textures
primitive.dev_verticesOut[vid].dev_diffuseTex = 0;
//check if textures exist
if (primitive.dev_diffuseTex)
{
primitive.dev_verticesOut[vid].dev_diffuseTex = primitive.dev_diffuseTex;
primitive.dev_verticesOut[vid].texWidth = primitive.diffuseTexWidth;
primitive.dev_verticesOut[vid].texHeight = primitive.diffuseTexHeight;
primitive.dev_verticesOut[vid].texcoord0 = primitive.dev_texcoord0[vid];
}
// TODO: Apply vertex transformation here
// Multiply the MVP matrix for each vertex position, this will transform everything into clipping space
// Then divide the pos by its w element to transform into NDC space
// Finally transform x and y to viewport space
//clip
primitive.dev_verticesOut[vid].pos = MVP * glm::vec4(primitive.dev_position[vid], 1.0f);
//ndc
primitive.dev_verticesOut[vid].pos /= primitive.dev_verticesOut[vid].pos.w;
//screen space
const float width_ndc = static_cast<float>(width) * 0.5f;
const float height_ndc = static_cast<float>(height) * 0.5f;
primitive.dev_verticesOut[vid].pos.x = width_ndc * (primitive.dev_verticesOut[vid].pos.x + 1.0f);
primitive.dev_verticesOut[vid].pos.y = height_ndc * (1.0f - primitive.dev_verticesOut[vid].pos.y);
primitive.dev_verticesOut[vid].pos.z = 0.5f * (1.0f + primitive.dev_verticesOut[vid].pos.z);
// TODO: Apply vertex assembly here
// Assemble all attribute arraies into the primitive array
primitive.dev_verticesOut[vid].eyeNor = MV_normal * primitive.dev_normal[vid];
primitive.dev_verticesOut[vid].eyeNor = glm::normalize(primitive.dev_verticesOut[vid].eyeNor);
primitive.dev_verticesOut[vid].eyePos = glm::vec3(MV * glm::vec4(primitive.dev_position[vid], 1.0f));
}
}
static int curPrimitiveBeginId = 0;
__global__
void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives,
PrimitiveDevBufPointers primitive)
{
// index id
int iid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (iid < numIndices)
{
// TODO: uncomment the following code for a start
// This is primitive assembly for triangles
int pid; // id for cur primitives vector
if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES)
{
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
// TODO: other primitive types (point, line)
}
}
__global__ void backface_cull(int totalPrimitives, glm::vec3 camera, Primitive* primitives)
{
int vid = blockIdx.x * blockDim.x + threadIdx.x;
if (vid < totalPrimitives)
{
glm::vec3 triangle_pos[3] =
{
glm::vec3(primitives[vid].v[0].pos),
glm::vec3(primitives[vid].v[1].pos),
glm::vec3(primitives[vid].v[2].pos)
};
glm::vec3 dir_1 = triangle_pos[0] - triangle_pos[1];
glm::vec3 dir_2 = triangle_pos[2] - triangle_pos[2];
glm::vec3 triangle_normal = glm::cross(dir_1, dir_2);
primitives[vid].backface_culled = false;
if(glm::dot(camera, triangle_normal) < 0.0f)
{
primitives[vid].backface_culled = true;
}
}
}
//stream compaction for backface culling
struct HostDeviceSteamCompactionCallback {
__host__ __device__ bool operator()(const Primitive &p) {
return !p.backface_culled;
};
};
__global__ void rasterize_triangles(int totalPrimitives, int width, int height, int* depths,
Primitive* primitives, Fragment* fragments)
{
int vid = blockIdx.x * blockDim.x + threadIdx.x;
if (vid < totalPrimitives)
{
glm::vec3 triangle_pos[3] =
{
glm::vec3(primitives[vid].v[0].pos),
glm::vec3(primitives[vid].v[1].pos),
glm::vec3(primitives[vid].v[2].pos)
};
glm::vec2 triangle_texcoords[3] =
{
primitives[vid].v[0].texcoord0,
primitives[vid].v[1].texcoord0,
primitives[vid].v[2].texcoord0,
};
int texture_width = primitives[vid].v[0].texWidth;
int texture_height = primitives[vid].v[0].texHeight;
//for correct color interpolation
glm::vec3 triangle_colors[3] =
{
glm::vec3(1.0f, 0.0f, 0.0f),
glm::vec3(0.0f, 1.0f, 0.0f),
glm::vec3(0.0f, 0.0f, 1.0f),
};
glm::vec3 eye_pos[3] =
{
primitives[vid].v[0].eyePos,
primitives[vid].v[1].eyePos,
primitives[vid].v[2].eyePos,
};
glm::vec3 eye_normal[3] =
{
primitives[vid].v[0].eyeNor,
primitives[vid].v[1].eyeNor,
primitives[vid].v[2].eyeNor,
};
//get aabb
AABB triangle_aabb = getAABBForTriangle(triangle_pos);
//clamp between screen size
triangle_aabb = [width, height](int min_x, int max_x, int min_y, int max_y)
{
AABB result{};
result.min.x = glm::clamp(min_x, 0, width - 1);
result.max.x = glm::clamp(max_x, 0, width - 1);
result.min.y = glm::clamp(min_y, 0, height - 1);
result.max.y = glm::clamp(max_y, 0, height - 1);
return result;
}(triangle_aabb.min.x, triangle_aabb.max.x,
triangle_aabb.min.y, triangle_aabb.max.y);
//scanline using baycentric
for (int x = triangle_aabb.min.x; x <= triangle_aabb.max.x; x++)
{
for (int y = triangle_aabb.min.y; y <= triangle_aabb.max.y; y++)
{
//caclulate baycentric (if pixel is on triangle)
const glm::vec2 pixel_space{x, y};
const glm::vec3 barycentric_coordinate = calculateBarycentricCoordinate(triangle_pos, pixel_space);
if(isBarycentricCoordInBounds(barycentric_coordinate))
{
float depth = -getZAtCoordinate(barycentric_coordinate, triangle_pos);
float depth_in_int = depth * 1000.0f;
int pixel = y * width + x;
//depth test (get the pixel closest)
const int old_depth = atomicMin(&depths[pixel], depth_in_int);
//fragment shading
//check if depth was closer (draw pixel on top)
if(old_depth != depths[pixel])
{
float eye_pos1_z = eye_pos[0].z;
float eye_pos2_z = eye_pos[1].z;
float eye_pos3_z = eye_pos[2].z;
float bary_correct_x = barycentric_coordinate.x / eye_pos1_z;
float bary_correct_y = barycentric_coordinate.y / eye_pos2_z;
float bary_correct_z = barycentric_coordinate.z / eye_pos3_z;
float perspective_correct_z = 1.0f / (bary_correct_x + bary_correct_y + bary_correct_z);
//debugging depth
//fragments[pixel].color = glm::vec3(depth);
//normals
//fragments[pixel].color = ;
//perspective correct normal
const glm::vec3 perspective_correct_eye_normal =
(
barycentric_coordinate.x * (eye_normal[0] / eye_pos1_z) +
barycentric_coordinate.y * (eye_normal[1] / eye_pos2_z) +
barycentric_coordinate.z * (eye_normal[2] / eye_pos3_z)
) * perspective_correct_z;
fragments[pixel].eyeNor = perspective_correct_eye_normal;
//textures
//perspective correct texture coordinate
const glm::vec2 perspective_correct_texcoord =
(
barycentric_coordinate.x * (triangle_texcoords[0] / eye_pos1_z) +
barycentric_coordinate.y * (triangle_texcoords[1] / eye_pos2_z) +
barycentric_coordinate.z * (triangle_texcoords[2] / eye_pos3_z)
) * perspective_correct_z;
fragments[pixel].texcoord0 = perspective_correct_texcoord;
TextureData* diffuse_texture = primitives[vid].v->dev_diffuseTex;
fragments[pixel].dev_diffuseTex = diffuse_texture;
if(diffuse_texture)
{
auto sample_texture = [&](int u, int v)
{
int v_height = v * texture_width;
int u_v_index = 3 * (u + v_height);
glm::vec3 texture_color =
{
diffuse_texture[u_v_index],
diffuse_texture[u_v_index + 1],
diffuse_texture[u_v_index + 2]
};
//put in range 0 -> 1
texture_color /= 255.0f;
return texture_color;
};
//bilinear
#ifdef BILINEAR_FILTERING
float u_float = static_cast<float>(texture_width) * perspective_correct_texcoord[0];
float v_float = static_cast<float>(texture_height) * perspective_correct_texcoord[1];
//4 points
int u_int = static_cast<int>(glm::floor(u_float));
int v_int = static_cast<int>(glm::floor(v_float));
int u_int_plus_one = glm::clamp(u_int + 1, 0, texture_width - 1);
int v_int_plus_one = glm::clamp(v_int + 1, 0, texture_height - 1);
//calculate difference (will be used in mixing
float u_diff = u_float - static_cast<float>(u_int);
float v_diff = v_float - static_cast<float>(v_int);
//sample 4 points (bilinear mix between them)
const auto sample_mix_1 = glm::mix(sample_texture(u_int, v_int), sample_texture(u_int, v_int_plus_one), v_diff);
const auto sample_mix_2 = glm::mix(sample_texture(u_int_plus_one, v_int), sample_texture(u_int_plus_one, v_int_plus_one), v_diff);
const auto sample_mix_final = glm::mix(sample_mix_1, sample_mix_2, u_diff);
fragments[pixel].color = sample_mix_final;
#else
//not bilinear
int u = texture_width * perspective_correct_texcoord[0];
int v = texture_height * perspective_correct_texcoord[1];
fragments[pixel].color = sample_texture(u, v);
#endif
}
//force color triangle interpolation (no texture)
else
{
//perspective correct color
const glm::vec3 perspective_correct_color =
(
barycentric_coordinate.x * (triangle_colors[0] / eye_pos1_z) +
barycentric_coordinate.y * (triangle_colors[1] / eye_pos2_z) +
barycentric_coordinate.z * (triangle_colors[2] / eye_pos3_z)
) * perspective_correct_z;
fragments[pixel].color = perspective_correct_color;
}
#ifdef COLOR_TRIANGLE_INTERPOLATION
//perspective correct color
const glm::vec3 perspective_correct_color =
(
barycentric_coordinate.x * (triangle_colors[0] / eye_pos1_z) +
barycentric_coordinate.y * (triangle_colors[1] / eye_pos2_z) +
barycentric_coordinate.z * (triangle_colors[2] / eye_pos3_z)
) * perspective_correct_z;
fragments[pixel].color = perspective_correct_color;
#endif
}
}
}
}
}
}
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
/**
* Perform rasterization.
*/
void rasterize(uchar4* pbo, const glm::mat4& MVP, const glm::mat4& MV, const glm::mat3 MV_normal, glm::vec3& camera_pos)
{
blockCount2d = dim3((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
// Execute your rasterization pipeline here
// (See README for rasterization pipeline outline.)
// Vertex Process & primitive assembly
{
curPrimitiveBeginId = 0;
dim3 numThreadsPerBlock(128);
auto it = mesh2PrimitivesMap.begin();
auto itEnd = mesh2PrimitivesMap.end();
for (; it != itEnd; ++it)
{
auto p = (it->second).begin(); // each primitive
auto pEnd = (it->second).end();
for (; p != pEnd; ++p)
{
dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
#ifdef PRINT_CLOCK
clock_now = std::chrono::high_resolution_clock::now();
#endif
_vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(
p->numVertices, *p, MVP, MV, MV_normal, width, height);
checkCUDAError("Vertex Processing");
cudaDeviceSynchronize();
#ifdef PRINT_CLOCK
std::cout << std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::high_resolution_clock::now() - clock_now).count() << std::endl;
#endif
#ifdef PRINT_CLOCK
clock_now = std::chrono::high_resolution_clock::now();
#endif
_primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> >
(p->numIndices,
curPrimitiveBeginId,
dev_primitives,
*p);
checkCUDAError("Primitive Assembly");
cudaDeviceSynchronize();
#ifdef PRINT_CLOCK
std::cout << std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::high_resolution_clock::now() - clock_now).count() << std::endl;
#endif
curPrimitiveBeginId += p->numPrimitives;
}
}
checkCUDAError("Vertex Processing and Primitive Assembly");
}
cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
initDepth << <blockCount2d, blockSize2d >> >(width, height, dev_depth);
const int blockSize1d = 128;
int remaining_primitives = totalNumPrimitives;
dim3 num_triangles((remaining_primitives + blockSize1d - 1) / blockSize1d);
//backface culling
#ifdef BACKFACE_CULLING
backface_cull<<<num_triangles, blockSize1d>>>(remaining_primitives, camera_pos, dev_primitives);
//stream compact away backface culled triangled using thrust
thrust::device_ptr<Primitive> dev_primitive_ptr_start = thrust::device_pointer_cast(dev_primitives);
thrust::device_ptr<Primitive> dev_primitive_ptr_end = thrust::device_pointer_cast(
dev_primitives + remaining_primitives);
//perform stream compaction
thrust::device_ptr<Primitive> new_dev_primitive_end = thrust::partition(
dev_primitive_ptr_start, dev_primitive_ptr_end, HostDeviceSteamCompactionCallback());
Primitive* dev_primitive_end = thrust::raw_pointer_cast(new_dev_primitive_end);
//update the primitive counts
remaining_primitives = dev_primitive_end - dev_primitives;
#endif
// TODO: rasterize
#ifdef PRINT_CLOCK
clock_now = std::chrono::high_resolution_clock::now();
#endif
rasterize_triangles<<<num_triangles, blockSize1d>>>(remaining_primitives, width, height, dev_depth, dev_primitives, dev_fragmentBuffer);
cudaDeviceSynchronize();
#ifdef PRINT_CLOCK
std::cout << std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::high_resolution_clock::now() - clock_now).count() << std::endl;
#endif
// Copy depthbuffer colors into framebuffer
glm::vec3 camera_pos_in_MV = glm::vec3(MV * glm::vec4(camera_pos, 1.0f));
#ifdef PRINT_CLOCK
clock_now = std::chrono::high_resolution_clock::now();
#endif
render << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer, dev_framebuffer, dev_lights, num_lights, camera_pos_in_MV);
cudaDeviceSynchronize();
#ifdef PRINT_CLOCK
std::cout << std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::high_resolution_clock::now() - clock_now).count() << std::endl;
#endif
checkCUDAError("fragment shader");
}
void zero_frame_buffer()
{
cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
}
void write_to_pbo(uchar4* pbo)
{
// Copy framebuffer into OpenGL buffer for OpenGL previewing
sendImageToPBO<<<blockCount2d, blockSize2d>>>(pbo, width, height, dev_framebuffer);
checkCUDAError("copy render result to pbo");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree()
{
// deconstruct primitives attribute/indices device buffer
auto it(mesh2PrimitivesMap.begin());
auto itEnd(mesh2PrimitivesMap.end());
for (; it != itEnd; ++it)
{
for (auto p = it->second.begin(); p != it->second.end(); ++p)
{
cudaFree(p->dev_indices);
cudaFree(p->dev_position);
cudaFree(p->dev_normal);
cudaFree(p->dev_texcoord0);
cudaFree(p->dev_diffuseTex);
cudaFree(p->dev_verticesOut);
//TODO: release other attributes and materials
}
}
////////////
// cudaFree(dev_primitives);
// dev_primitives = NULL;
for(auto& object : objects)
{
free(object.dev_primitives);
}
cudaFree(dev_fragmentBuffer);
dev_fragmentBuffer = NULL;
cudaFree(dev_framebuffer);
dev_framebuffer = NULL;
cudaFree(dev_depth);
dev_depth = NULL;
cudaFree(dev_lights);
dev_lights = NULL;
checkCUDAError("rasterize Free");
}
|
764e7feecc9ba0bec95ed08dcfa961aad1471d50.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/cudf_gtest.hpp>
#include <tests/utilities/type_list_utilities.hpp>
#include <tests/utilities/type_lists.hpp>
#include <thrust/device_vector.h>
struct DispatcherTest : public cudf::test::BaseFixture {
};
template <typename T>
struct TypedDispatcherTest : public DispatcherTest {
};
TYPED_TEST_CASE(TypedDispatcherTest, cudf::test::AllTypes);
namespace {
template <typename Expected>
struct type_tester {
template <typename Dispatched>
bool operator()()
{
return std::is_same<Expected, Dispatched>::value;
}
};
} // namespace
TYPED_TEST(TypedDispatcherTest, TypeToId)
{
EXPECT_TRUE(cudf::type_dispatcher(cudf::data_type{cudf::type_to_id<TypeParam>()},
type_tester<TypeParam>{}));
}
namespace {
struct verify_dispatched_type {
template <typename T>
__host__ __device__ bool operator()(cudf::type_id id)
{
return id == cudf::type_to_id<T>();
}
};
__global__ void dispatch_test_kernel(cudf::type_id id, bool* d_result)
{
if (0 == threadIdx.x + blockIdx.x * blockDim.x)
*d_result = cudf::type_dispatcher(cudf::data_type{id}, verify_dispatched_type{}, id);
}
} // namespace
TYPED_TEST(TypedDispatcherTest, DeviceDispatch)
{
thrust::device_vector<bool> result(1, false);
hipLaunchKernelGGL(( dispatch_test_kernel), dim3(1), dim3(1), 0, 0, cudf::type_to_id<TypeParam>(), result.data().get());
CUDA_TRY(hipDeviceSynchronize());
EXPECT_EQ(true, result[0]);
}
struct IdDispatcherTest : public DispatcherTest, public testing::WithParamInterface<cudf::type_id> {
};
INSTANTIATE_TEST_CASE_P(TestAllIds, IdDispatcherTest, testing::ValuesIn(cudf::test::all_type_ids));
TEST_P(IdDispatcherTest, IdToType)
{
auto t = GetParam();
EXPECT_TRUE(cudf::type_dispatcher(cudf::data_type{t}, verify_dispatched_type{}, t));
}
CUDF_TEST_PROGRAM_MAIN()
|
764e7feecc9ba0bec95ed08dcfa961aad1471d50.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/cudf_gtest.hpp>
#include <tests/utilities/type_list_utilities.hpp>
#include <tests/utilities/type_lists.hpp>
#include <thrust/device_vector.h>
struct DispatcherTest : public cudf::test::BaseFixture {
};
template <typename T>
struct TypedDispatcherTest : public DispatcherTest {
};
TYPED_TEST_CASE(TypedDispatcherTest, cudf::test::AllTypes);
namespace {
template <typename Expected>
struct type_tester {
template <typename Dispatched>
bool operator()()
{
return std::is_same<Expected, Dispatched>::value;
}
};
} // namespace
TYPED_TEST(TypedDispatcherTest, TypeToId)
{
EXPECT_TRUE(cudf::type_dispatcher(cudf::data_type{cudf::type_to_id<TypeParam>()},
type_tester<TypeParam>{}));
}
namespace {
struct verify_dispatched_type {
template <typename T>
__host__ __device__ bool operator()(cudf::type_id id)
{
return id == cudf::type_to_id<T>();
}
};
__global__ void dispatch_test_kernel(cudf::type_id id, bool* d_result)
{
if (0 == threadIdx.x + blockIdx.x * blockDim.x)
*d_result = cudf::type_dispatcher(cudf::data_type{id}, verify_dispatched_type{}, id);
}
} // namespace
TYPED_TEST(TypedDispatcherTest, DeviceDispatch)
{
thrust::device_vector<bool> result(1, false);
dispatch_test_kernel<<<1, 1>>>(cudf::type_to_id<TypeParam>(), result.data().get());
CUDA_TRY(cudaDeviceSynchronize());
EXPECT_EQ(true, result[0]);
}
struct IdDispatcherTest : public DispatcherTest, public testing::WithParamInterface<cudf::type_id> {
};
INSTANTIATE_TEST_CASE_P(TestAllIds, IdDispatcherTest, testing::ValuesIn(cudf::test::all_type_ids));
TEST_P(IdDispatcherTest, IdToType)
{
auto t = GetParam();
EXPECT_TRUE(cudf::type_dispatcher(cudf::data_type{t}, verify_dispatched_type{}, t));
}
CUDF_TEST_PROGRAM_MAIN()
|
7c483918d04ba06103638060c8dc899ee861c03f.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (C) 2011, Federico Raimondo ([email protected])
* Modified to build under Windows by Yunhui Zhou.
*
* This file is part of Cudaica.
*
* Cudaica is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* any later version.
*
* Cudaica is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Cudaica. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Modification Log:
* 1.Memory allocation to "buffer" and "floatbuffer" change from mapmalloc() to malloc() to fix "\dev\zero" not supported on Windows issue.
* Correspondingly, mapfree() is changed to free().
* 2. Write data to double precision file.
*
* Yunhui Zhou
* 2018/09/03
*/
#include <stdio.h>
#include <config.h>
#include <common.h>
#include <device.h>
#include <error.h>
#include <io.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <string.h>
#define MAX_DIMENSION(a) ( a > 512 ? 512 : a)
/*
* Zeroes a matrix.
* Should dispatch rows blocks of cols threads
*
* data: matrix
* pitch: matrix row size in bytes
*/
__global__ void zeroMatrix(real* data, size_t pitch) {
data[threadIdx.x + blockIdx.x * pitch/sizeof(real)] = 0.0;
}
/*
* Identity matrix
* Should be launched with channels blocks x channels threads
*
* data: matrix
* pitch: matrix row size in bytes
*/
__global__ void eye(real * data, size_t pitch) {
size_t colwidth = pitch/sizeof(real);
real value = (threadIdx.x == blockIdx.x ? 1.0 : 0.0);
data[threadIdx.x + blockIdx.x * colwidth] = value;
}
/*
* Memory mappings functions
*/
#include <mman.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stddef.h>
#include <errno.h>
void *mapmalloc(size_t size) {
void * base;
#ifdef DARWIN
base = malloc(size);
#else
int fd;
fd = open("/dev/zero",O_RDWR);
base = mmap(NULL,(size_t)(size),PROT_READ|PROT_WRITE,MAP_PRIVATE,fd,0);
if (base == MAP_FAILED) {
printf("Oh dear, something went wrong with mmap()! %s\n", strerror(errno));
}
close(fd);
#endif
return base;
}
void mapfree(void *addr, size_t size) {
#ifdef DARWIN
free(addr);
#else
munmap(addr,size);
#endif
}
/*
*
* Read a total of size floting point values from the file specified by
* fname to the device memory.
*
* fname: file name to be read
* size: numer of bytes in the file
* mat: output (should have enough size)
*/
void dev_matread(char *fname, int rows, int cols, real *mat, size_t pitch) {
FILE *file = fopen(fname,"rb");
real *buffer;
float *floatbuffer;
int items;
int size = rows * cols;
if (!file) {
printf("open failed\n");
exit (0);
}
//floatbuffer = (float*)mapmalloc(size*sizeof(float));
//buffer = (real*)mapmalloc(size*sizeof(real));
floatbuffer = (float*)malloc(size * sizeof(float));
buffer = (real*)malloc(size * sizeof(real));
items = (int)fread(floatbuffer,sizeof(float),size,file);
if (items != size) {
printf("invalid number of elements\n");
exit (0);
}
for (int i = 0; i < size; i++){
buffer[i] = (real) floatbuffer[i];
}
HANDLE_ERROR(hipMemcpy2D(mat, pitch, buffer, cols*sizeof(real), cols*sizeof(real), rows, hipMemcpyHostToDevice));
//mapfree(buffer,size*sizeof(real));
//mapfree(floatbuffer,size*sizeof(float));
free(buffer);
free(floatbuffer);
fclose(file);
}
/*
*
* Read a total of size integer values from the file specified by fname to the device memory.
*
* fname: file name to be read
* size: numer of bytes in the file
* mat: output (should have enough size)
*/
void dev_matreadInt(char *fname, int rows, int cols, int *mat, size_t pitch) {
FILE *file = fopen(fname,"rb");
int *buffer;
int items;
int size = rows * cols;
if (!file) {
printf("open failed\n");
exit (0);
}
//buffer = (int*)mapmalloc(size*sizeof(int));
buffer = (int*)malloc(size * sizeof(int));
items = (int)fread(buffer,sizeof(int),size,file);
if (items != size) {
printf("invalid number of elements\n");
exit (0);
}
HANDLE_ERROR(hipMemcpy2D(mat, pitch, buffer, cols*sizeof(int), cols*sizeof(int), rows, hipMemcpyHostToDevice));
//mapfree(buffer,size*sizeof(int));
free(buffer);
fclose(file);
}
/*
* Write a total of size floting point values from matrix in the device memory to the
* file.
*
* fname: file name to be written to
* rows: number of rows in the matrix
* cols: number of rows in the matrix
* mat: matrix
* pitch: matrix row size in bytes
*/
void dev_matwrite(char *fname, int rows, int cols, real *mat, size_t pitch) {
FILE *file = fopen(fname,"wb");
real *buffer;
//float *floatbuffer;
int items;
int size = rows * cols;
if (!file) {
printf("open failed\n");
exit (0);
}
// buffer = (real*)mapmalloc(size*sizeof(real)); // This line cause error
buffer = (real*)malloc(size * sizeof(real));
DPRINTF(2, "Copying %d by %d cols from %p to %p\n", rows, cols, mat, buffer);
HANDLE_ERROR(hipMemcpy2D(buffer, cols*sizeof(real), mat, pitch, cols*sizeof(real), rows, hipMemcpyDeviceToHost));
//floatbuffer = (float*)mapmalloc(size*sizeof(float));
//floatbuffer = (float*)malloc(size * sizeof(float));
//for (int i = 0; i < size; i++){
// floatbuffer[i] = (float) buffer[i];
//}
//items = (int)fwrite(floatbuffer,sizeof(float),size,file);
items = (int)fwrite(buffer, sizeof(real), size, file);
if (items != size) {
printf("invalid number of elements\n");
exit (0);
}
//mapfree(floatbuffer, size*sizeof(float));
//mapfree(buffer,size*sizeof(real));
//free(floatbuffer);
free(buffer);
//floatbuffer = NULL;
buffer = NULL;
fclose(file);
}
/*
* Write a total of size integer values from matrix in the device memory to the
* file.
*
* fname: file name to be written to
* rows: number of rows in the matrix
* cols: number of rows in the matrix
* mat: matrix
* pitch: matrix row size in bytes
*/
void dev_matwriteInt(char *fname, int rows, int cols, int *mat, size_t pitch) {
FILE *file = fopen(fname,"wb");
int *buffer;
int items;
int size = rows * cols;
if (!file) {
printf("open failed\n");
exit (0);
}
// buffer = (int*)mapmalloc(size*sizeof(int));
buffer = (int*)malloc(size * sizeof(int));
HANDLE_ERROR(hipMemcpy2D(buffer, cols*sizeof(int), mat, pitch, cols*sizeof(int), rows, hipMemcpyDeviceToHost));
items = (int)fwrite(buffer,sizeof(int),size,file);
if (items != size) {
printf("invalid number of elements\n");
exit (0);
}
//mapfree(buffer,size*sizeof(int));
free(buffer);
fclose(file);
}
/*
* Write a total of size unsigned integer values from matrix in the device memory to the
* file.
*
* fname: file name to be written to
* rows: number of rows in the matrix
* cols: number of rows in the matrix
* mat: matrix
* pitch: matrix row size in bytes
*/
void dev_matwriteNat(char *fname, int rows, int cols, natural *mat, size_t pitch) {
FILE *file = fopen(fname,"wb");
natural *buffer;
int items;
int size = rows * cols;
if (!file) {
printf("open failed\n");
exit (0);
}
//buffer = (natural*)mapmalloc(size*sizeof(natural));
buffer = (natural*)malloc(size * sizeof(natural));
HANDLE_ERROR(hipMemcpy2D(buffer, cols*sizeof(natural), mat, pitch, cols*sizeof(natural), rows, hipMemcpyDeviceToHost));
items = (natural)fwrite(buffer,sizeof(natural),size,file);
if (items != size) {
printf("invalid number of elements\n");
exit (0);
}
//mapfree(buffer,size*sizeof(int));
free(buffer);
fclose(file);
}
void printVector(real* data, natural size) {
int j = 0;
for (j = 0; j < size; j++) {
printf("%d = %f\n", j, data[j]);
}
}
void dev_printVector(real* data, natural size, natural max) {
real* host = (real*) malloc(size*sizeof(real));
HANDLE_ERROR(hipMemcpy(host, data, size*sizeof(real), hipMemcpyDeviceToHost));
printVector(host, max);
free(host);
}
/*
* Calculates greatest common divisor between u and v
*/
unsigned int gcd(natural u, natural v) {
int shift;
if (u == 0 || v == 0)
return u | v;
for (shift = 0; ((u | v) & 1) == 0; ++shift) {
u >>= 1;
v >>= 1;
}
while ((u & 1) == 0)
u >>= 1;
do {
while ((v & 1) == 0)
v >>= 1;
if (u < v) {
v -= u;
} else {
natural diff = u - v;
u = v;
v = diff;
}
v >>= 1;
} while (v != 0);
return u << shift;
}
/* -- translated by f2c (version 19940927).
You must link the resulting object file with the libraries:
-lf2c -lm (in that order)
*/
real dsum_(integer *n, real *dx, integer *incx) {
/* System generated locals */
//~ integer i__1, i__2;
real ret_val, d__1, d__2, d__3, d__4, d__5, d__6;
/* Local variables */
static integer i, m;
static real dtemp;
static integer nincx, mp1;
#define DX(I) dx[(I)-1]
ret_val = 0.;
dtemp = 0.;
if (*n <= 0 || *incx <= 0) {
return ret_val;
}
if (*incx == 1) {
goto L20;
}
/* code for increment not equal to 1 */
nincx = *n * *incx;
for (i = 1; *incx < 0 ? i >= nincx : i <= nincx; i += *incx) {
dtemp += (d__1 = DX(i), d__1);
}
ret_val = dtemp;
return ret_val;
L20:
m = *n % 6;
if (m == 0) {
goto L40;
}
for (i = 1; i <= m; ++i) {
dtemp += (d__1 = DX(i), d__1);
}
if (*n < 6) {
goto L60;
}
L40:
mp1 = m + 1;
for (i = mp1; i <= *n; i += 6) {
dtemp = dtemp + (d__1 = DX(i), d__1) + (d__2 = DX(i + 1),
d__2) + (d__3 = DX(i + 2), d__3) + (d__4 = DX(i + 3),
d__4) + (d__5 = DX(i + 4), d__5) + (d__6 = DX(i + 5)
, d__6);
}
L60:
ret_val = dtemp;
return ret_val;
} /* dsum_ */
|
7c483918d04ba06103638060c8dc899ee861c03f.cu
|
/*
* Copyright (C) 2011, Federico Raimondo ([email protected])
* Modified to build under Windows by Yunhui Zhou.
*
* This file is part of Cudaica.
*
* Cudaica is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* any later version.
*
* Cudaica is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Cudaica. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Modification Log:
* 1.Memory allocation to "buffer" and "floatbuffer" change from mapmalloc() to malloc() to fix "\dev\zero" not supported on Windows issue.
* Correspondingly, mapfree() is changed to free().
* 2. Write data to double precision file.
*
* Yunhui Zhou
* 2018/09/03
*/
#include <stdio.h>
#include <config.h>
#include <common.h>
#include <device.h>
#include <error.h>
#include <io.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <string.h>
#define MAX_DIMENSION(a) ( a > 512 ? 512 : a)
/*
* Zeroes a matrix.
* Should dispatch rows blocks of cols threads
*
* data: matrix
* pitch: matrix row size in bytes
*/
__global__ void zeroMatrix(real* data, size_t pitch) {
data[threadIdx.x + blockIdx.x * pitch/sizeof(real)] = 0.0;
}
/*
* Identity matrix
* Should be launched with channels blocks x channels threads
*
* data: matrix
* pitch: matrix row size in bytes
*/
__global__ void eye(real * data, size_t pitch) {
size_t colwidth = pitch/sizeof(real);
real value = (threadIdx.x == blockIdx.x ? 1.0 : 0.0);
data[threadIdx.x + blockIdx.x * colwidth] = value;
}
/*
* Memory mappings functions
*/
#include <mman.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stddef.h>
#include <errno.h>
void *mapmalloc(size_t size) {
void * base;
#ifdef DARWIN
base = malloc(size);
#else
int fd;
fd = open("/dev/zero",O_RDWR);
base = mmap(NULL,(size_t)(size),PROT_READ|PROT_WRITE,MAP_PRIVATE,fd,0);
if (base == MAP_FAILED) {
printf("Oh dear, something went wrong with mmap()! %s\n", strerror(errno));
}
close(fd);
#endif
return base;
}
void mapfree(void *addr, size_t size) {
#ifdef DARWIN
free(addr);
#else
munmap(addr,size);
#endif
}
/*
*
* Read a total of size floting point values from the file specified by
* fname to the device memory.
*
* fname: file name to be read
* size: numer of bytes in the file
* mat: output (should have enough size)
*/
void dev_matread(char *fname, int rows, int cols, real *mat, size_t pitch) {
FILE *file = fopen(fname,"rb");
real *buffer;
float *floatbuffer;
int items;
int size = rows * cols;
if (!file) {
printf("open failed\n");
exit (0);
}
//floatbuffer = (float*)mapmalloc(size*sizeof(float));
//buffer = (real*)mapmalloc(size*sizeof(real));
floatbuffer = (float*)malloc(size * sizeof(float));
buffer = (real*)malloc(size * sizeof(real));
items = (int)fread(floatbuffer,sizeof(float),size,file);
if (items != size) {
printf("invalid number of elements\n");
exit (0);
}
for (int i = 0; i < size; i++){
buffer[i] = (real) floatbuffer[i];
}
HANDLE_ERROR(cudaMemcpy2D(mat, pitch, buffer, cols*sizeof(real), cols*sizeof(real), rows, cudaMemcpyHostToDevice));
//mapfree(buffer,size*sizeof(real));
//mapfree(floatbuffer,size*sizeof(float));
free(buffer);
free(floatbuffer);
fclose(file);
}
/*
*
* Read a total of size integer values from the file specified by fname to the device memory.
*
* fname: file name to be read
* size: numer of bytes in the file
* mat: output (should have enough size)
*/
void dev_matreadInt(char *fname, int rows, int cols, int *mat, size_t pitch) {
FILE *file = fopen(fname,"rb");
int *buffer;
int items;
int size = rows * cols;
if (!file) {
printf("open failed\n");
exit (0);
}
//buffer = (int*)mapmalloc(size*sizeof(int));
buffer = (int*)malloc(size * sizeof(int));
items = (int)fread(buffer,sizeof(int),size,file);
if (items != size) {
printf("invalid number of elements\n");
exit (0);
}
HANDLE_ERROR(cudaMemcpy2D(mat, pitch, buffer, cols*sizeof(int), cols*sizeof(int), rows, cudaMemcpyHostToDevice));
//mapfree(buffer,size*sizeof(int));
free(buffer);
fclose(file);
}
/*
* Write a total of size floting point values from matrix in the device memory to the
* file.
*
* fname: file name to be written to
* rows: number of rows in the matrix
* cols: number of rows in the matrix
* mat: matrix
* pitch: matrix row size in bytes
*/
void dev_matwrite(char *fname, int rows, int cols, real *mat, size_t pitch) {
FILE *file = fopen(fname,"wb");
real *buffer;
//float *floatbuffer;
int items;
int size = rows * cols;
if (!file) {
printf("open failed\n");
exit (0);
}
// buffer = (real*)mapmalloc(size*sizeof(real)); // This line cause error
buffer = (real*)malloc(size * sizeof(real));
DPRINTF(2, "Copying %d by %d cols from %p to %p\n", rows, cols, mat, buffer);
HANDLE_ERROR(cudaMemcpy2D(buffer, cols*sizeof(real), mat, pitch, cols*sizeof(real), rows, cudaMemcpyDeviceToHost));
//floatbuffer = (float*)mapmalloc(size*sizeof(float));
//floatbuffer = (float*)malloc(size * sizeof(float));
//for (int i = 0; i < size; i++){
// floatbuffer[i] = (float) buffer[i];
//}
//items = (int)fwrite(floatbuffer,sizeof(float),size,file);
items = (int)fwrite(buffer, sizeof(real), size, file);
if (items != size) {
printf("invalid number of elements\n");
exit (0);
}
//mapfree(floatbuffer, size*sizeof(float));
//mapfree(buffer,size*sizeof(real));
//free(floatbuffer);
free(buffer);
//floatbuffer = NULL;
buffer = NULL;
fclose(file);
}
/*
* Write a total of size integer values from matrix in the device memory to the
* file.
*
* fname: file name to be written to
* rows: number of rows in the matrix
* cols: number of rows in the matrix
* mat: matrix
* pitch: matrix row size in bytes
*/
void dev_matwriteInt(char *fname, int rows, int cols, int *mat, size_t pitch) {
FILE *file = fopen(fname,"wb");
int *buffer;
int items;
int size = rows * cols;
if (!file) {
printf("open failed\n");
exit (0);
}
// buffer = (int*)mapmalloc(size*sizeof(int));
buffer = (int*)malloc(size * sizeof(int));
HANDLE_ERROR(cudaMemcpy2D(buffer, cols*sizeof(int), mat, pitch, cols*sizeof(int), rows, cudaMemcpyDeviceToHost));
items = (int)fwrite(buffer,sizeof(int),size,file);
if (items != size) {
printf("invalid number of elements\n");
exit (0);
}
//mapfree(buffer,size*sizeof(int));
free(buffer);
fclose(file);
}
/*
* Write a total of size unsigned integer values from matrix in the device memory to the
* file.
*
* fname: file name to be written to
* rows: number of rows in the matrix
* cols: number of rows in the matrix
* mat: matrix
* pitch: matrix row size in bytes
*/
void dev_matwriteNat(char *fname, int rows, int cols, natural *mat, size_t pitch) {
FILE *file = fopen(fname,"wb");
natural *buffer;
int items;
int size = rows * cols;
if (!file) {
printf("open failed\n");
exit (0);
}
//buffer = (natural*)mapmalloc(size*sizeof(natural));
buffer = (natural*)malloc(size * sizeof(natural));
HANDLE_ERROR(cudaMemcpy2D(buffer, cols*sizeof(natural), mat, pitch, cols*sizeof(natural), rows, cudaMemcpyDeviceToHost));
items = (natural)fwrite(buffer,sizeof(natural),size,file);
if (items != size) {
printf("invalid number of elements\n");
exit (0);
}
//mapfree(buffer,size*sizeof(int));
free(buffer);
fclose(file);
}
void printVector(real* data, natural size) {
int j = 0;
for (j = 0; j < size; j++) {
printf("%d = %f\n", j, data[j]);
}
}
void dev_printVector(real* data, natural size, natural max) {
real* host = (real*) malloc(size*sizeof(real));
HANDLE_ERROR(cudaMemcpy(host, data, size*sizeof(real), cudaMemcpyDeviceToHost));
printVector(host, max);
free(host);
}
/*
* Calculates greatest common divisor between u and v
*/
unsigned int gcd(natural u, natural v) {
int shift;
if (u == 0 || v == 0)
return u | v;
for (shift = 0; ((u | v) & 1) == 0; ++shift) {
u >>= 1;
v >>= 1;
}
while ((u & 1) == 0)
u >>= 1;
do {
while ((v & 1) == 0)
v >>= 1;
if (u < v) {
v -= u;
} else {
natural diff = u - v;
u = v;
v = diff;
}
v >>= 1;
} while (v != 0);
return u << shift;
}
/* -- translated by f2c (version 19940927).
You must link the resulting object file with the libraries:
-lf2c -lm (in that order)
*/
real dsum_(integer *n, real *dx, integer *incx) {
/* System generated locals */
//~ integer i__1, i__2;
real ret_val, d__1, d__2, d__3, d__4, d__5, d__6;
/* Local variables */
static integer i, m;
static real dtemp;
static integer nincx, mp1;
#define DX(I) dx[(I)-1]
ret_val = 0.;
dtemp = 0.;
if (*n <= 0 || *incx <= 0) {
return ret_val;
}
if (*incx == 1) {
goto L20;
}
/* code for increment not equal to 1 */
nincx = *n * *incx;
for (i = 1; *incx < 0 ? i >= nincx : i <= nincx; i += *incx) {
dtemp += (d__1 = DX(i), d__1);
}
ret_val = dtemp;
return ret_val;
L20:
m = *n % 6;
if (m == 0) {
goto L40;
}
for (i = 1; i <= m; ++i) {
dtemp += (d__1 = DX(i), d__1);
}
if (*n < 6) {
goto L60;
}
L40:
mp1 = m + 1;
for (i = mp1; i <= *n; i += 6) {
dtemp = dtemp + (d__1 = DX(i), d__1) + (d__2 = DX(i + 1),
d__2) + (d__3 = DX(i + 2), d__3) + (d__4 = DX(i + 3),
d__4) + (d__5 = DX(i + 4), d__5) + (d__6 = DX(i + 5)
, d__6);
}
L60:
ret_val = dtemp;
return ret_val;
} /* dsum_ */
|
0c66f4685aca8b0a79316b97b811d8128d40199c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include"_reg_resampling.h"
#include"_reg_maths.h"
#include "resampleKernel.h"
#include "_reg_common_cuda.h"
#include"_reg_tools.h"
#include "interpolations.h"
#define SINC_KERNEL_RADIUS 3
#define SINC_KERNEL_SIZE SINC_KERNEL_RADIUS*2
/* *************************************************************** */
unsigned int min1(unsigned int a, unsigned int b)
{
return (a < b) ? a : b;
}
/* *************************************************************** */
template<class DTYPE>
__device__ __inline__ void reg_mat44_mul_cuda(DTYPE const* mat, DTYPE const* in, DTYPE *out)
{
out[0] = (DTYPE)((double)mat[0 * 4 + 0] * (double)in[0] + (double)mat[0 * 4 + 1] * (double)in[1] + (double)mat[0 * 4 + 2] * (double)in[2] + (double)mat[0 * 4 + 3]);
out[1] = (DTYPE)((double)mat[1 * 4 + 0] * (double)in[0] + (double)mat[1 * 4 + 1] * (double)in[1] + (double)mat[1 * 4 + 2] * (double)in[2] + (double)mat[1 * 4 + 3]);
out[2] = (DTYPE)((double)mat[2 * 4 + 0] * (double)in[0] + (double)mat[2 * 4 + 1] * (double)in[1] + (double)mat[2 * 4 + 2] * (double)in[2] + (double)mat[2 * 4 + 3]);
return;
}
/* *************************************************************** */
template<class DTYPE>
__device__ __inline__ void reg_mat44_mul_cuda(float* mat, DTYPE const* in, DTYPE *out)
{
out[0] = (DTYPE)((double)mat[0 * 4 + 0] * (double)in[0] + (double)mat[0 * 4 + 1] * (double)in[1] + (double)mat[0 * 4 + 2] * (double)in[2] + (double)mat[0 * 4 + 3]);
out[1] = (DTYPE)((double)mat[1 * 4 + 0] * (double)in[0] + (double)mat[1 * 4 + 1] * (double)in[1] + (double)mat[1 * 4 + 2] * (double)in[2] + (double)mat[1 * 4 + 3]);
out[2] = (DTYPE)((double)mat[2 * 4 + 0] * (double)in[0] + (double)mat[2 * 4 + 1] * (double)in[1] + (double)mat[2 * 4 + 2] * (double)in[2] + (double)mat[2 * 4 + 3]);
return;
}
/* *************************************************************** */
__device__ __inline__ int cuda_reg_floor(double a)
{
return (int) (floor(a));
}
/* *************************************************************** */
template<class FieldTYPE>
__device__ __inline__ void interpolantCubicSpline(FieldTYPE ratio, FieldTYPE *basis)
{
if (ratio < 0.0)
ratio = 0.0; //reg_rounding error
double FF = (double) ratio * ratio;
basis[0] = (FieldTYPE) ((ratio * (((double)2.0 - ratio) * ratio - (double)1.0)) / (double)2.0);
basis[1] = (FieldTYPE) ((FF * ((double)3.0 * ratio - 5.0) + 2.0) / (double)2.0);
basis[2] = (FieldTYPE) ((ratio * (((double)4.0 - (double)3.0 * ratio) * ratio + (double)1.0)) / (double)2.0);
basis[3] = (FieldTYPE) ((ratio - (double)1.0) * FF / (double)2.0);
}
/* *************************************************************** */
__inline__ __device__ void interpWindowedSincKernel(double relative, double *basis)
{
if (relative < 0.0)
relative = 0.0; //reg_rounding error
int j = 0;
double sum = 0.;
for (int i = -SINC_KERNEL_RADIUS; i < SINC_KERNEL_RADIUS; ++i) {
double x = relative - (double) (i);
if (x == 0.0)
basis[j] = 1.0;
else if (abs(x) >= (double) (SINC_KERNEL_RADIUS))
basis[j] = 0;
else {
double pi_x = M_PI * x;
basis[j] = (SINC_KERNEL_RADIUS) * sin(pi_x) * sin(pi_x / SINC_KERNEL_RADIUS) / (pi_x * pi_x);
}
sum += basis[j];
j++;
}
for (int i = 0; i < SINC_KERNEL_SIZE; ++i)
basis[i] /= sum;
}
/* *************************************************************** */
__inline__ __device__ void interpCubicSplineKernel(double relative, double *basis)
{
// if (relative < 0.0)
// relative = 0.0; //reg_rounding error
double FF = relative * relative;
basis[0] = (relative * ((2.0 - relative) * relative - 1.0)) / 2.0;
basis[1] = (FF * (3.0 * relative - 5.0) + 2.0) / 2.0;
basis[2] = (relative * ((4.0 - 3.0 * relative) * relative + 1.0)) / 2.0;
basis[3] = (relative - 1.0) * FF / 2.0;
}
/* *************************************************************** */
__inline__ __device__ void interpLinearKernel(double relative, double *basis)
{
if (relative < 0.0)
relative = 0.0; //reg_rounding error
basis[1] = relative;
basis[0] = 1.0 - relative;
}
/* *************************************************************** */
__inline__ __device__ void interpNearestNeighKernel(double relative, double *basis)
{
if (relative < 0.0)
relative = 0.0; //reg_rounding error
basis[0] = basis[1] = 0.0;
if (relative >= 0.5)
basis[1] = 1;
else
basis[0] = 1;
}
/* *************************************************************** */
__inline__ __device__ double interpLoop2D(const float* floatingIntensity,
const double* xBasis,
const double* yBasis,
int *previous,
uint3 fi_xyz,
const float paddingValue,
const unsigned int kernel_size)
{
double intensity = (double)(0.0);
for (int b = 0; b < kernel_size; b++) {
int Y = previous[1] + b;
bool yInBounds = -1 < Y && Y < fi_xyz.y;
double xTempNewValue = 0.0;
for (int a = 0; a < kernel_size; a++) {
int X = previous[0] + a;
bool xInBounds = -1 < X && X < fi_xyz.x;
const unsigned int idx = Y * fi_xyz.x + X;
xTempNewValue += (xInBounds && yInBounds) ? floatingIntensity[idx] * xBasis[a] : paddingValue * xBasis[a];
}
intensity += xTempNewValue * yBasis[b];
}
return intensity;
}
/* *************************************************************** */
template <const resampler_boundary_e tBoundary>
__inline__ __device__ double interpLoop2DBoundary(const float* floatingIntensity,
const double* xBasis,
const double* yBasis,
int *previous,
uint3 fi_xyz,
const unsigned int kernel_size)
{
double intensity = (double)(0.0);
for (int b = 0; b < kernel_size; b++) {
const int offset_x = reg_applyBoundary<tBoundary>(previous[1] + b, fi_xyz.y)*fi_xyz.x;
double xTempNewValue = 0.0;
for (int a = 0; a < kernel_size; a++) {
const unsigned int idx = offset_x + reg_applyBoundary<tBoundary>(previous[0] + a, fi_xyz.x);
xTempNewValue += floatingIntensity[idx]*xBasis[a];
}
intensity += xTempNewValue*yBasis[b];
}
return intensity;
}
/* *************************************************************** */
__inline__ __device__ double interpLoop3D(const float* floatingIntensity,
const double* xBasis,
const double* yBasis,
const double* zBasis,
int *previous,
uint3 fi_xyz,
float paddingValue,
unsigned int kernel_size)
{
double intensity = (double)(0.0);
for (int c = 0; c < kernel_size; c++) {
int Z = previous[2] + c;
bool zInBounds = -1 < Z && Z < fi_xyz.z;
double yTempNewValue = 0.0;
for (int b = 0; b < kernel_size; b++) {
int Y = previous[1] + b;
bool yInBounds = -1 < Y && Y < fi_xyz.y;
double xTempNewValue = 0.0;
for (int a = 0; a < kernel_size; a++) {
int X = previous[0] + a;
bool xInBounds = -1 < X && X < fi_xyz.x;
const unsigned int idx = Z * fi_xyz.x * fi_xyz.y + Y * fi_xyz.x + X;
xTempNewValue += (xInBounds && yInBounds && zInBounds) ? floatingIntensity[idx] * xBasis[a] : paddingValue * xBasis[a];
}
yTempNewValue += xTempNewValue * yBasis[b];
}
intensity += yTempNewValue * zBasis[c];
}
return intensity;
}
/* *************************************************************** */
template <const resampler_boundary_e tBoundary>
__inline__ __device__ double interpLoop3DBoundary(const float* floatingIntensity,
const double* xBasis,
const double* yBasis,
const double* zBasis,
int *previous,
uint3 fi_xyz,
unsigned int kernel_size)
{
double intensity = (double)(0.0);
for (int c = 0; c < kernel_size; c++) {
const int offset_y = reg_applyBoundary<tBoundary>(previous[2] + c, fi_xyz.z)*fi_xyz.y;
double yTempNewValue = 0.0;
for (int b = 0; b < kernel_size; b++) {
const int offset_x = (offset_y + reg_applyBoundary<tBoundary>(previous[1] + b, fi_xyz.y))*fi_xyz.x;
double xTempNewValue = 0.0;
for (int a = 0; a < kernel_size; a++) {
const unsigned int idx = offset_x + reg_applyBoundary<tBoundary>(previous[0] + a, fi_xyz.x);
xTempNewValue += floatingIntensity[idx]*xBasis[a];
}
yTempNewValue += xTempNewValue*yBasis[b];
}
intensity += yTempNewValue*zBasis[c];
}
return intensity;
}
/* *************************************************************** */
template <const resampler_boundary_e tBoundary>
__global__ void ResampleImage2D(const float* floatingImage,
const float* deformationField,
float* warpedImage,
ulong2 voxelNumber,
uint3 fi_xyz,
uint2 wi_tu,
const float paddingValue,
const int kernelType)
{
const float *sourceIntensityPtr = (floatingImage);
float *resultIntensityPtr = (warpedImage);
const float *deformationFieldPtrX = (deformationField);
const float *deformationFieldPtrY = &deformationFieldPtrX[voxelNumber.x];
long index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < voxelNumber.x) {
for (unsigned int t = 0; t < wi_tu.x * wi_tu.y; t++) {
float *resultIntensity = &resultIntensityPtr[t * voxelNumber.x];
const float *floatingIntensity = &sourceIntensityPtr[t * voxelNumber.y];
double intensity = paddingValue;
int previous[3];
float position[3];
double relative[3];
auto launchInterpLoop = [&](const double xBasis[], const double yBasis[], const int kernelSize) {
if (resampler_boundary_e(tBoundary) != resampler_boundary_e::ZEROPAD && resampler_boundary_e(tBoundary) != resampler_boundary_e::NANPAD) {
intensity = interpLoop2DBoundary<tBoundary>(floatingIntensity, xBasis, yBasis, previous, fi_xyz, kernelSize);
} else {
intensity = interpLoop2D(floatingIntensity, xBasis, yBasis, previous, fi_xyz, paddingValue, kernelSize);
}
};
position[0] = (float)(deformationFieldPtrX[index]);
position[1] = (float)(deformationFieldPtrY[index]);
previous[0] = cuda_reg_floor(position[0]);
previous[1] = cuda_reg_floor(position[1]);
relative[0] = (double)(position[0]) - (double)(previous[0]);
relative[1] = (double)(position[1]) - (double)(previous[1]);
if (kernelType == 0) {
double xBasisIn[2], yBasisIn[2];
interpNearestNeighKernel(relative[0], xBasisIn);
interpNearestNeighKernel(relative[1], yBasisIn);
launchInterpLoop(xBasisIn, yBasisIn, 2);
}
else if (kernelType == 1) {
double xBasisIn[2], yBasisIn[2];
interpLinearKernel(relative[0], xBasisIn);
interpLinearKernel(relative[1], yBasisIn);
launchInterpLoop(xBasisIn, yBasisIn, 2);
}
else if (kernelType == 4) {
double xBasisIn[6], yBasisIn[6];
previous[0] -= SINC_KERNEL_RADIUS;
previous[1] -= SINC_KERNEL_RADIUS;
previous[2] -= SINC_KERNEL_RADIUS;
interpWindowedSincKernel(relative[0], xBasisIn);
interpWindowedSincKernel(relative[1], yBasisIn);
launchInterpLoop(xBasisIn, yBasisIn, 6);
}
else {
double xBasisIn[4], yBasisIn[4];
previous[0]--;
previous[1]--;
previous[2]--;
reg_getNiftynetCubicSpline(relative[0], xBasisIn);
reg_getNiftynetCubicSpline(relative[1], yBasisIn);
launchInterpLoop(xBasisIn, yBasisIn, 4);
}
resultIntensity[index] = (float)intensity;
}
index += blockDim.x * gridDim.x;
}
}
/* *************************************************************** */
template <const resampler_boundary_e tBoundary>
__global__ void ResampleImage3D(const float* floatingImage,
const float* deformationField,
float* warpedImage,
const ulong2 voxelNumber,
uint3 fi_xyz,
uint2 wi_tu,
const float paddingValue,
int kernelType)
{
const float *sourceIntensityPtr = (floatingImage);
float *resultIntensityPtr = (warpedImage);
const float *deformationFieldPtrX = (deformationField);
const float *deformationFieldPtrY = &deformationFieldPtrX[voxelNumber.x];
const float *deformationFieldPtrZ = &deformationFieldPtrY[voxelNumber.x];
long index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < voxelNumber.x) {
for (unsigned int t = 0; t < wi_tu.x * wi_tu.y; t++) {
float *resultIntensity = &resultIntensityPtr[t * voxelNumber.x];
const float *floatingIntensity = &sourceIntensityPtr[t * voxelNumber.y];
double intensity = paddingValue;
int previous[3];
float position[3];
double relative[3];
auto launchInterpLoop = [&](const double xBasisIn[], const double yBasisIn[], const double zBasisIn[], const int kernelSize) {
if (resampler_boundary_e(tBoundary) != resampler_boundary_e::ZEROPAD && resampler_boundary_e(tBoundary) != resampler_boundary_e::NANPAD) {
intensity = interpLoop3DBoundary<tBoundary>(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, kernelSize);
} else {
intensity = interpLoop3D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, kernelSize);
}
};
position[0] = (float) (deformationFieldPtrX[index]);
position[1] = (float) (deformationFieldPtrY[index]);
position[2] = (float) (deformationFieldPtrZ[index]);
previous[0] = cuda_reg_floor(position[0]);
previous[1] = cuda_reg_floor(position[1]);
previous[2] = cuda_reg_floor(position[2]);
relative[0] = (double)(position[0]) - (double)(previous[0]);
relative[1] = (double)(position[1]) - (double)(previous[1]);
relative[2] = (double)(position[2]) - (double)(previous[2]);
if (kernelType == 0) {
double xBasisIn[2], yBasisIn[2], zBasisIn[2];
interpNearestNeighKernel(relative[0], xBasisIn);
interpNearestNeighKernel(relative[1], yBasisIn);
interpNearestNeighKernel(relative[2], zBasisIn);
launchInterpLoop(xBasisIn, yBasisIn, zBasisIn, 2);
} else if (kernelType == 1) {
double xBasisIn[2], yBasisIn[2], zBasisIn[2];
interpLinearKernel(relative[0], xBasisIn);
interpLinearKernel(relative[1], yBasisIn);
interpLinearKernel(relative[2], zBasisIn);
launchInterpLoop(xBasisIn, yBasisIn, zBasisIn, 2);
} else if (kernelType == 4) {
double xBasisIn[6], yBasisIn[6], zBasisIn[6];
previous[0] -= SINC_KERNEL_RADIUS;
previous[1] -= SINC_KERNEL_RADIUS;
previous[2] -= SINC_KERNEL_RADIUS;
interpWindowedSincKernel(relative[0], xBasisIn);
interpWindowedSincKernel(relative[1], yBasisIn);
interpWindowedSincKernel(relative[2], zBasisIn);
launchInterpLoop(xBasisIn, yBasisIn, zBasisIn, 6);
} else {
double xBasisIn[4], yBasisIn[4], zBasisIn[4];
previous[0]--;
previous[1]--;
previous[2]--;
reg_getNiftynetCubicSpline(relative[0], xBasisIn);
reg_getNiftynetCubicSpline(relative[1], yBasisIn);
reg_getNiftynetCubicSpline(relative[2], zBasisIn);
launchInterpLoop(xBasisIn, yBasisIn, zBasisIn, 4);
}
resultIntensity[index] = (float)intensity;
}
index += blockDim.x * gridDim.x;
}
}
/* *************************************************************** */
void launchResample(const nifti_image *floatingImage,
const nifti_image *warpedImage,
const int interp,
const resampler_boundary_e boundary,
const float *floatingImage_d,
float *warpedImage_d,
const float *deformationFieldImage_d) {
const float paddingValue = reg_getPaddingValue<float>(boundary);
long targetVoxelNumber = (long) warpedImage->nx * warpedImage->ny * warpedImage->nz;
ulong2 voxelNumber = make_ulong2(warpedImage->nx * warpedImage->ny * warpedImage->nz, floatingImage->nx * floatingImage->ny * floatingImage->nz);
dim3 mygrid;
dim3 myblocks;
uint3 fi_xyz = make_uint3(floatingImage->nx, floatingImage->ny, floatingImage->nz);
uint2 wi_tu = make_uint2(warpedImage->nt, warpedImage->nu);
cudaCommon_computeGridConfiguration(myblocks, mygrid, targetVoxelNumber);
if (floatingImage->nz > 1 || warpedImage->nz > 1) {
switch (boundary) {
case resampler_boundary_e::CLAMPING:
hipLaunchKernelGGL(( ResampleImage3D<resampler_boundary_e::CLAMPING>) , dim3(mygrid), dim3(myblocks), 0, 0, floatingImage_d,
deformationFieldImage_d,
warpedImage_d,
voxelNumber,
fi_xyz,
wi_tu,
paddingValue,
interp);
break;
case resampler_boundary_e::REFLECTING:
hipLaunchKernelGGL(( ResampleImage3D<resampler_boundary_e::REFLECTING>) , dim3(mygrid), dim3(myblocks), 0, 0, floatingImage_d,
deformationFieldImage_d,
warpedImage_d,
voxelNumber,
fi_xyz,
wi_tu,
paddingValue,
interp);
break;
default:
hipLaunchKernelGGL(( ResampleImage3D<resampler_boundary_e::ZEROPAD>) , dim3(mygrid), dim3(myblocks), 0, 0, floatingImage_d,
deformationFieldImage_d,
warpedImage_d,
voxelNumber,
fi_xyz,
wi_tu,
paddingValue,
interp);
}
} else{
switch (boundary) {
case resampler_boundary_e::CLAMPING:
hipLaunchKernelGGL(( ResampleImage2D<resampler_boundary_e::CLAMPING>) , dim3(mygrid), dim3(myblocks), 0, 0, floatingImage_d,
deformationFieldImage_d,
warpedImage_d,
voxelNumber,
fi_xyz,
wi_tu,
paddingValue,
interp);
break;
case resampler_boundary_e::REFLECTING:
hipLaunchKernelGGL(( ResampleImage2D<resampler_boundary_e::REFLECTING>) , dim3(mygrid), dim3(myblocks), 0, 0, floatingImage_d,
deformationFieldImage_d,
warpedImage_d,
voxelNumber,
fi_xyz,
wi_tu,
paddingValue,
interp);
break;
default:
hipLaunchKernelGGL(( ResampleImage2D<resampler_boundary_e::ZEROPAD>) , dim3(mygrid), dim3(myblocks), 0, 0, floatingImage_d,
deformationFieldImage_d,
warpedImage_d,
voxelNumber,
fi_xyz,
wi_tu,
paddingValue,
interp);
}
}
#ifndef NDEBUG
NR_CUDA_CHECK_KERNEL(mygrid, myblocks)
#else
NR_CUDA_SAFE_CALL(hipDeviceSynchronize());
#endif
}
/* *************************************************************** */
|
0c66f4685aca8b0a79316b97b811d8128d40199c.cu
|
#include <stdio.h>
#include <assert.h>
#include "cuda_runtime.h"
#include "cuda.h"
#include"_reg_resampling.h"
#include"_reg_maths.h"
#include "resampleKernel.h"
#include "_reg_common_cuda.h"
#include"_reg_tools.h"
#include "interpolations.h"
#define SINC_KERNEL_RADIUS 3
#define SINC_KERNEL_SIZE SINC_KERNEL_RADIUS*2
/* *************************************************************** */
unsigned int min1(unsigned int a, unsigned int b)
{
return (a < b) ? a : b;
}
/* *************************************************************** */
template<class DTYPE>
__device__ __inline__ void reg_mat44_mul_cuda(DTYPE const* mat, DTYPE const* in, DTYPE *out)
{
out[0] = (DTYPE)((double)mat[0 * 4 + 0] * (double)in[0] + (double)mat[0 * 4 + 1] * (double)in[1] + (double)mat[0 * 4 + 2] * (double)in[2] + (double)mat[0 * 4 + 3]);
out[1] = (DTYPE)((double)mat[1 * 4 + 0] * (double)in[0] + (double)mat[1 * 4 + 1] * (double)in[1] + (double)mat[1 * 4 + 2] * (double)in[2] + (double)mat[1 * 4 + 3]);
out[2] = (DTYPE)((double)mat[2 * 4 + 0] * (double)in[0] + (double)mat[2 * 4 + 1] * (double)in[1] + (double)mat[2 * 4 + 2] * (double)in[2] + (double)mat[2 * 4 + 3]);
return;
}
/* *************************************************************** */
template<class DTYPE>
__device__ __inline__ void reg_mat44_mul_cuda(float* mat, DTYPE const* in, DTYPE *out)
{
out[0] = (DTYPE)((double)mat[0 * 4 + 0] * (double)in[0] + (double)mat[0 * 4 + 1] * (double)in[1] + (double)mat[0 * 4 + 2] * (double)in[2] + (double)mat[0 * 4 + 3]);
out[1] = (DTYPE)((double)mat[1 * 4 + 0] * (double)in[0] + (double)mat[1 * 4 + 1] * (double)in[1] + (double)mat[1 * 4 + 2] * (double)in[2] + (double)mat[1 * 4 + 3]);
out[2] = (DTYPE)((double)mat[2 * 4 + 0] * (double)in[0] + (double)mat[2 * 4 + 1] * (double)in[1] + (double)mat[2 * 4 + 2] * (double)in[2] + (double)mat[2 * 4 + 3]);
return;
}
/* *************************************************************** */
__device__ __inline__ int cuda_reg_floor(double a)
{
return (int) (floor(a));
}
/* *************************************************************** */
template<class FieldTYPE>
__device__ __inline__ void interpolantCubicSpline(FieldTYPE ratio, FieldTYPE *basis)
{
if (ratio < 0.0)
ratio = 0.0; //reg_rounding error
double FF = (double) ratio * ratio;
basis[0] = (FieldTYPE) ((ratio * (((double)2.0 - ratio) * ratio - (double)1.0)) / (double)2.0);
basis[1] = (FieldTYPE) ((FF * ((double)3.0 * ratio - 5.0) + 2.0) / (double)2.0);
basis[2] = (FieldTYPE) ((ratio * (((double)4.0 - (double)3.0 * ratio) * ratio + (double)1.0)) / (double)2.0);
basis[3] = (FieldTYPE) ((ratio - (double)1.0) * FF / (double)2.0);
}
/* *************************************************************** */
__inline__ __device__ void interpWindowedSincKernel(double relative, double *basis)
{
if (relative < 0.0)
relative = 0.0; //reg_rounding error
int j = 0;
double sum = 0.;
for (int i = -SINC_KERNEL_RADIUS; i < SINC_KERNEL_RADIUS; ++i) {
double x = relative - (double) (i);
if (x == 0.0)
basis[j] = 1.0;
else if (abs(x) >= (double) (SINC_KERNEL_RADIUS))
basis[j] = 0;
else {
double pi_x = M_PI * x;
basis[j] = (SINC_KERNEL_RADIUS) * sin(pi_x) * sin(pi_x / SINC_KERNEL_RADIUS) / (pi_x * pi_x);
}
sum += basis[j];
j++;
}
for (int i = 0; i < SINC_KERNEL_SIZE; ++i)
basis[i] /= sum;
}
/* *************************************************************** */
__inline__ __device__ void interpCubicSplineKernel(double relative, double *basis)
{
// if (relative < 0.0)
// relative = 0.0; //reg_rounding error
double FF = relative * relative;
basis[0] = (relative * ((2.0 - relative) * relative - 1.0)) / 2.0;
basis[1] = (FF * (3.0 * relative - 5.0) + 2.0) / 2.0;
basis[2] = (relative * ((4.0 - 3.0 * relative) * relative + 1.0)) / 2.0;
basis[3] = (relative - 1.0) * FF / 2.0;
}
/* *************************************************************** */
__inline__ __device__ void interpLinearKernel(double relative, double *basis)
{
if (relative < 0.0)
relative = 0.0; //reg_rounding error
basis[1] = relative;
basis[0] = 1.0 - relative;
}
/* *************************************************************** */
__inline__ __device__ void interpNearestNeighKernel(double relative, double *basis)
{
if (relative < 0.0)
relative = 0.0; //reg_rounding error
basis[0] = basis[1] = 0.0;
if (relative >= 0.5)
basis[1] = 1;
else
basis[0] = 1;
}
/* *************************************************************** */
__inline__ __device__ double interpLoop2D(const float* floatingIntensity,
const double* xBasis,
const double* yBasis,
int *previous,
uint3 fi_xyz,
const float paddingValue,
const unsigned int kernel_size)
{
double intensity = (double)(0.0);
for (int b = 0; b < kernel_size; b++) {
int Y = previous[1] + b;
bool yInBounds = -1 < Y && Y < fi_xyz.y;
double xTempNewValue = 0.0;
for (int a = 0; a < kernel_size; a++) {
int X = previous[0] + a;
bool xInBounds = -1 < X && X < fi_xyz.x;
const unsigned int idx = Y * fi_xyz.x + X;
xTempNewValue += (xInBounds && yInBounds) ? floatingIntensity[idx] * xBasis[a] : paddingValue * xBasis[a];
}
intensity += xTempNewValue * yBasis[b];
}
return intensity;
}
/* *************************************************************** */
template <const resampler_boundary_e tBoundary>
__inline__ __device__ double interpLoop2DBoundary(const float* floatingIntensity,
const double* xBasis,
const double* yBasis,
int *previous,
uint3 fi_xyz,
const unsigned int kernel_size)
{
double intensity = (double)(0.0);
for (int b = 0; b < kernel_size; b++) {
const int offset_x = reg_applyBoundary<tBoundary>(previous[1] + b, fi_xyz.y)*fi_xyz.x;
double xTempNewValue = 0.0;
for (int a = 0; a < kernel_size; a++) {
const unsigned int idx = offset_x + reg_applyBoundary<tBoundary>(previous[0] + a, fi_xyz.x);
xTempNewValue += floatingIntensity[idx]*xBasis[a];
}
intensity += xTempNewValue*yBasis[b];
}
return intensity;
}
/* *************************************************************** */
__inline__ __device__ double interpLoop3D(const float* floatingIntensity,
const double* xBasis,
const double* yBasis,
const double* zBasis,
int *previous,
uint3 fi_xyz,
float paddingValue,
unsigned int kernel_size)
{
double intensity = (double)(0.0);
for (int c = 0; c < kernel_size; c++) {
int Z = previous[2] + c;
bool zInBounds = -1 < Z && Z < fi_xyz.z;
double yTempNewValue = 0.0;
for (int b = 0; b < kernel_size; b++) {
int Y = previous[1] + b;
bool yInBounds = -1 < Y && Y < fi_xyz.y;
double xTempNewValue = 0.0;
for (int a = 0; a < kernel_size; a++) {
int X = previous[0] + a;
bool xInBounds = -1 < X && X < fi_xyz.x;
const unsigned int idx = Z * fi_xyz.x * fi_xyz.y + Y * fi_xyz.x + X;
xTempNewValue += (xInBounds && yInBounds && zInBounds) ? floatingIntensity[idx] * xBasis[a] : paddingValue * xBasis[a];
}
yTempNewValue += xTempNewValue * yBasis[b];
}
intensity += yTempNewValue * zBasis[c];
}
return intensity;
}
/* *************************************************************** */
template <const resampler_boundary_e tBoundary>
__inline__ __device__ double interpLoop3DBoundary(const float* floatingIntensity,
const double* xBasis,
const double* yBasis,
const double* zBasis,
int *previous,
uint3 fi_xyz,
unsigned int kernel_size)
{
double intensity = (double)(0.0);
for (int c = 0; c < kernel_size; c++) {
const int offset_y = reg_applyBoundary<tBoundary>(previous[2] + c, fi_xyz.z)*fi_xyz.y;
double yTempNewValue = 0.0;
for (int b = 0; b < kernel_size; b++) {
const int offset_x = (offset_y + reg_applyBoundary<tBoundary>(previous[1] + b, fi_xyz.y))*fi_xyz.x;
double xTempNewValue = 0.0;
for (int a = 0; a < kernel_size; a++) {
const unsigned int idx = offset_x + reg_applyBoundary<tBoundary>(previous[0] + a, fi_xyz.x);
xTempNewValue += floatingIntensity[idx]*xBasis[a];
}
yTempNewValue += xTempNewValue*yBasis[b];
}
intensity += yTempNewValue*zBasis[c];
}
return intensity;
}
/* *************************************************************** */
template <const resampler_boundary_e tBoundary>
__global__ void ResampleImage2D(const float* floatingImage,
const float* deformationField,
float* warpedImage,
ulong2 voxelNumber,
uint3 fi_xyz,
uint2 wi_tu,
const float paddingValue,
const int kernelType)
{
const float *sourceIntensityPtr = (floatingImage);
float *resultIntensityPtr = (warpedImage);
const float *deformationFieldPtrX = (deformationField);
const float *deformationFieldPtrY = &deformationFieldPtrX[voxelNumber.x];
long index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < voxelNumber.x) {
for (unsigned int t = 0; t < wi_tu.x * wi_tu.y; t++) {
float *resultIntensity = &resultIntensityPtr[t * voxelNumber.x];
const float *floatingIntensity = &sourceIntensityPtr[t * voxelNumber.y];
double intensity = paddingValue;
int previous[3];
float position[3];
double relative[3];
auto launchInterpLoop = [&](const double xBasis[], const double yBasis[], const int kernelSize) {
if (resampler_boundary_e(tBoundary) != resampler_boundary_e::ZEROPAD && resampler_boundary_e(tBoundary) != resampler_boundary_e::NANPAD) {
intensity = interpLoop2DBoundary<tBoundary>(floatingIntensity, xBasis, yBasis, previous, fi_xyz, kernelSize);
} else {
intensity = interpLoop2D(floatingIntensity, xBasis, yBasis, previous, fi_xyz, paddingValue, kernelSize);
}
};
position[0] = (float)(deformationFieldPtrX[index]);
position[1] = (float)(deformationFieldPtrY[index]);
previous[0] = cuda_reg_floor(position[0]);
previous[1] = cuda_reg_floor(position[1]);
relative[0] = (double)(position[0]) - (double)(previous[0]);
relative[1] = (double)(position[1]) - (double)(previous[1]);
if (kernelType == 0) {
double xBasisIn[2], yBasisIn[2];
interpNearestNeighKernel(relative[0], xBasisIn);
interpNearestNeighKernel(relative[1], yBasisIn);
launchInterpLoop(xBasisIn, yBasisIn, 2);
}
else if (kernelType == 1) {
double xBasisIn[2], yBasisIn[2];
interpLinearKernel(relative[0], xBasisIn);
interpLinearKernel(relative[1], yBasisIn);
launchInterpLoop(xBasisIn, yBasisIn, 2);
}
else if (kernelType == 4) {
double xBasisIn[6], yBasisIn[6];
previous[0] -= SINC_KERNEL_RADIUS;
previous[1] -= SINC_KERNEL_RADIUS;
previous[2] -= SINC_KERNEL_RADIUS;
interpWindowedSincKernel(relative[0], xBasisIn);
interpWindowedSincKernel(relative[1], yBasisIn);
launchInterpLoop(xBasisIn, yBasisIn, 6);
}
else {
double xBasisIn[4], yBasisIn[4];
previous[0]--;
previous[1]--;
previous[2]--;
reg_getNiftynetCubicSpline(relative[0], xBasisIn);
reg_getNiftynetCubicSpline(relative[1], yBasisIn);
launchInterpLoop(xBasisIn, yBasisIn, 4);
}
resultIntensity[index] = (float)intensity;
}
index += blockDim.x * gridDim.x;
}
}
/* *************************************************************** */
template <const resampler_boundary_e tBoundary>
__global__ void ResampleImage3D(const float* floatingImage,
const float* deformationField,
float* warpedImage,
const ulong2 voxelNumber,
uint3 fi_xyz,
uint2 wi_tu,
const float paddingValue,
int kernelType)
{
const float *sourceIntensityPtr = (floatingImage);
float *resultIntensityPtr = (warpedImage);
const float *deformationFieldPtrX = (deformationField);
const float *deformationFieldPtrY = &deformationFieldPtrX[voxelNumber.x];
const float *deformationFieldPtrZ = &deformationFieldPtrY[voxelNumber.x];
long index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < voxelNumber.x) {
for (unsigned int t = 0; t < wi_tu.x * wi_tu.y; t++) {
float *resultIntensity = &resultIntensityPtr[t * voxelNumber.x];
const float *floatingIntensity = &sourceIntensityPtr[t * voxelNumber.y];
double intensity = paddingValue;
int previous[3];
float position[3];
double relative[3];
auto launchInterpLoop = [&](const double xBasisIn[], const double yBasisIn[], const double zBasisIn[], const int kernelSize) {
if (resampler_boundary_e(tBoundary) != resampler_boundary_e::ZEROPAD && resampler_boundary_e(tBoundary) != resampler_boundary_e::NANPAD) {
intensity = interpLoop3DBoundary<tBoundary>(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, kernelSize);
} else {
intensity = interpLoop3D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, kernelSize);
}
};
position[0] = (float) (deformationFieldPtrX[index]);
position[1] = (float) (deformationFieldPtrY[index]);
position[2] = (float) (deformationFieldPtrZ[index]);
previous[0] = cuda_reg_floor(position[0]);
previous[1] = cuda_reg_floor(position[1]);
previous[2] = cuda_reg_floor(position[2]);
relative[0] = (double)(position[0]) - (double)(previous[0]);
relative[1] = (double)(position[1]) - (double)(previous[1]);
relative[2] = (double)(position[2]) - (double)(previous[2]);
if (kernelType == 0) {
double xBasisIn[2], yBasisIn[2], zBasisIn[2];
interpNearestNeighKernel(relative[0], xBasisIn);
interpNearestNeighKernel(relative[1], yBasisIn);
interpNearestNeighKernel(relative[2], zBasisIn);
launchInterpLoop(xBasisIn, yBasisIn, zBasisIn, 2);
} else if (kernelType == 1) {
double xBasisIn[2], yBasisIn[2], zBasisIn[2];
interpLinearKernel(relative[0], xBasisIn);
interpLinearKernel(relative[1], yBasisIn);
interpLinearKernel(relative[2], zBasisIn);
launchInterpLoop(xBasisIn, yBasisIn, zBasisIn, 2);
} else if (kernelType == 4) {
double xBasisIn[6], yBasisIn[6], zBasisIn[6];
previous[0] -= SINC_KERNEL_RADIUS;
previous[1] -= SINC_KERNEL_RADIUS;
previous[2] -= SINC_KERNEL_RADIUS;
interpWindowedSincKernel(relative[0], xBasisIn);
interpWindowedSincKernel(relative[1], yBasisIn);
interpWindowedSincKernel(relative[2], zBasisIn);
launchInterpLoop(xBasisIn, yBasisIn, zBasisIn, 6);
} else {
double xBasisIn[4], yBasisIn[4], zBasisIn[4];
previous[0]--;
previous[1]--;
previous[2]--;
reg_getNiftynetCubicSpline(relative[0], xBasisIn);
reg_getNiftynetCubicSpline(relative[1], yBasisIn);
reg_getNiftynetCubicSpline(relative[2], zBasisIn);
launchInterpLoop(xBasisIn, yBasisIn, zBasisIn, 4);
}
resultIntensity[index] = (float)intensity;
}
index += blockDim.x * gridDim.x;
}
}
/* *************************************************************** */
void launchResample(const nifti_image *floatingImage,
const nifti_image *warpedImage,
const int interp,
const resampler_boundary_e boundary,
const float *floatingImage_d,
float *warpedImage_d,
const float *deformationFieldImage_d) {
const float paddingValue = reg_getPaddingValue<float>(boundary);
long targetVoxelNumber = (long) warpedImage->nx * warpedImage->ny * warpedImage->nz;
ulong2 voxelNumber = make_ulong2(warpedImage->nx * warpedImage->ny * warpedImage->nz, floatingImage->nx * floatingImage->ny * floatingImage->nz);
dim3 mygrid;
dim3 myblocks;
uint3 fi_xyz = make_uint3(floatingImage->nx, floatingImage->ny, floatingImage->nz);
uint2 wi_tu = make_uint2(warpedImage->nt, warpedImage->nu);
cudaCommon_computeGridConfiguration(myblocks, mygrid, targetVoxelNumber);
if (floatingImage->nz > 1 || warpedImage->nz > 1) {
switch (boundary) {
case resampler_boundary_e::CLAMPING:
ResampleImage3D<resampler_boundary_e::CLAMPING> <<<mygrid, myblocks>>>(floatingImage_d,
deformationFieldImage_d,
warpedImage_d,
voxelNumber,
fi_xyz,
wi_tu,
paddingValue,
interp);
break;
case resampler_boundary_e::REFLECTING:
ResampleImage3D<resampler_boundary_e::REFLECTING> <<<mygrid, myblocks>>>(floatingImage_d,
deformationFieldImage_d,
warpedImage_d,
voxelNumber,
fi_xyz,
wi_tu,
paddingValue,
interp);
break;
default:
ResampleImage3D<resampler_boundary_e::ZEROPAD> <<<mygrid, myblocks>>>(floatingImage_d,
deformationFieldImage_d,
warpedImage_d,
voxelNumber,
fi_xyz,
wi_tu,
paddingValue,
interp);
}
} else{
switch (boundary) {
case resampler_boundary_e::CLAMPING:
ResampleImage2D<resampler_boundary_e::CLAMPING> <<<mygrid, myblocks>>>(floatingImage_d,
deformationFieldImage_d,
warpedImage_d,
voxelNumber,
fi_xyz,
wi_tu,
paddingValue,
interp);
break;
case resampler_boundary_e::REFLECTING:
ResampleImage2D<resampler_boundary_e::REFLECTING> <<<mygrid, myblocks>>>(floatingImage_d,
deformationFieldImage_d,
warpedImage_d,
voxelNumber,
fi_xyz,
wi_tu,
paddingValue,
interp);
break;
default:
ResampleImage2D<resampler_boundary_e::ZEROPAD> <<<mygrid, myblocks>>>(floatingImage_d,
deformationFieldImage_d,
warpedImage_d,
voxelNumber,
fi_xyz,
wi_tu,
paddingValue,
interp);
}
}
#ifndef NDEBUG
NR_CUDA_CHECK_KERNEL(mygrid, myblocks)
#else
NR_CUDA_SAFE_CALL(cudaThreadSynchronize());
#endif
}
/* *************************************************************** */
|
68f957520dd8ccb3fc18b55f0e6005d94b59f431.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/context_cuda.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernels {
namespace {
template <typename T>
__global__ void _Sigmoid(const int N, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = T(1) / (T(1) + exp(-x[i]));
}
}
template <>
__global__ void _Sigmoid<half>(const int N, const half* x, half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = __float2half(1.f / (1.f + exp(-__half2float(x[i]))));
}
}
template <>
__global__ void _Sigmoid<half2>(const int N, const half2* x, half2* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float2 val = __half22float2(x[i]);
y[i] =
__floats2half2_rn(1.f / (1.f + exp(-val.x)), 1.f / (1.f + exp(-val.y)));
}
}
template <typename T>
__global__ void _SigmoidGrad(const int N, const T* dy, const T* y, T* dx) {
CUDA_1D_KERNEL_LOOP(i, N) {
dx[i] = dy[i] * __ldg(y + i) * (1 - __ldg(y + i));
}
}
template <>
__global__ void
_SigmoidGrad<half>(const int N, const half* dy, const half* y, half* dx) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float val = __half2float(y[i]);
dx[i] = __float2half(__half2float(dy[i]) * val * (1.f - val));
}
} // SigmoidGrad
template <>
__global__ void
_SigmoidGrad<half2>(const int N, const half2* dy, const half2* y, half2* dx) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float2 val = __half22float2(y[i]);
const float2 grad = __half22float2(dy[i]);
dx[i] = __floats2half2_rn(
grad.x * val.x * (1.f - val.x), grad.y * val.y * (1.f - val.y));
}
} // SigmoidGrad
} // namespace
/* ------------------- Launcher Separator ------------------- */
template <>
void Sigmoid<float16, CUDAContext>(
const int N,
const float16* x,
float16* y,
CUDAContext* ctx) {
if ((N & 1) == 0) {
hipLaunchKernelGGL(( _Sigmoid), dim3(CUDA_BLOCKS(N >> 1)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
N >> 1, reinterpret_cast<const half2*>(x), reinterpret_cast<half2*>(y));
} else {
hipLaunchKernelGGL(( _Sigmoid), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
N, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y));
}
}
template <>
void SigmoidGrad<float16, CUDAContext>(
const int N,
const float16* dy,
const float16* y,
float16* dx,
CUDAContext* ctx) {
if ((N & 1) == 0) {
hipLaunchKernelGGL(( _SigmoidGrad), dim3(CUDA_BLOCKS(N >> 1)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
N >> 1,
reinterpret_cast<const half2*>(dy),
reinterpret_cast<const half2*>(y),
reinterpret_cast<half2*>(dx));
} else {
hipLaunchKernelGGL(( _SigmoidGrad), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
N,
reinterpret_cast<const half*>(dy),
reinterpret_cast<const half*>(y),
reinterpret_cast<half*>(dx));
}
} // SigmoidGrad
#define DEFINE_KERNEL_LAUNCHER(T) \
template <> \
void Sigmoid<T, CUDAContext>( \
const int N, const T* x, T* y, CUDAContext* ctx) { \
hipLaunchKernelGGL(( _Sigmoid), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
N, x, y); \
}
#define DEFINE_GRAD_KERNEL_LAUNCHER(T) \
template <> \
void SigmoidGrad<T, CUDAContext>( \
const int N, const T* dy, const T* y, T* dx, CUDAContext* ctx) { \
hipLaunchKernelGGL(( _SigmoidGrad), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
N, dy, y, dx); \
}
DEFINE_KERNEL_LAUNCHER(float);
DEFINE_KERNEL_LAUNCHER(double);
DEFINE_GRAD_KERNEL_LAUNCHER(float);
DEFINE_GRAD_KERNEL_LAUNCHER(double);
#undef DEFINE_KERNEL_LAUNCHER
#undef DEFINE_GRAD_KERNEL_LAUNCHER
} // namespace kernels
} // namespace dragon
#endif // USE_ROCM
|
68f957520dd8ccb3fc18b55f0e6005d94b59f431.cu
|
#ifdef USE_CUDA
#include "dragon/core/context_cuda.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernels {
namespace {
template <typename T>
__global__ void _Sigmoid(const int N, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = T(1) / (T(1) + exp(-x[i]));
}
}
template <>
__global__ void _Sigmoid<half>(const int N, const half* x, half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = __float2half(1.f / (1.f + exp(-__half2float(x[i]))));
}
}
template <>
__global__ void _Sigmoid<half2>(const int N, const half2* x, half2* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float2 val = __half22float2(x[i]);
y[i] =
__floats2half2_rn(1.f / (1.f + exp(-val.x)), 1.f / (1.f + exp(-val.y)));
}
}
template <typename T>
__global__ void _SigmoidGrad(const int N, const T* dy, const T* y, T* dx) {
CUDA_1D_KERNEL_LOOP(i, N) {
dx[i] = dy[i] * __ldg(y + i) * (1 - __ldg(y + i));
}
}
template <>
__global__ void
_SigmoidGrad<half>(const int N, const half* dy, const half* y, half* dx) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float val = __half2float(y[i]);
dx[i] = __float2half(__half2float(dy[i]) * val * (1.f - val));
}
} // SigmoidGrad
template <>
__global__ void
_SigmoidGrad<half2>(const int N, const half2* dy, const half2* y, half2* dx) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float2 val = __half22float2(y[i]);
const float2 grad = __half22float2(dy[i]);
dx[i] = __floats2half2_rn(
grad.x * val.x * (1.f - val.x), grad.y * val.y * (1.f - val.y));
}
} // SigmoidGrad
} // namespace
/* ------------------- Launcher Separator ------------------- */
template <>
void Sigmoid<float16, CUDAContext>(
const int N,
const float16* x,
float16* y,
CUDAContext* ctx) {
if ((N & 1) == 0) {
_Sigmoid<<<CUDA_BLOCKS(N >> 1), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
N >> 1, reinterpret_cast<const half2*>(x), reinterpret_cast<half2*>(y));
} else {
_Sigmoid<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
N, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y));
}
}
template <>
void SigmoidGrad<float16, CUDAContext>(
const int N,
const float16* dy,
const float16* y,
float16* dx,
CUDAContext* ctx) {
if ((N & 1) == 0) {
_SigmoidGrad<<<CUDA_BLOCKS(N >> 1), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
N >> 1,
reinterpret_cast<const half2*>(dy),
reinterpret_cast<const half2*>(y),
reinterpret_cast<half2*>(dx));
} else {
_SigmoidGrad<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
N,
reinterpret_cast<const half*>(dy),
reinterpret_cast<const half*>(y),
reinterpret_cast<half*>(dx));
}
} // SigmoidGrad
#define DEFINE_KERNEL_LAUNCHER(T) \
template <> \
void Sigmoid<T, CUDAContext>( \
const int N, const T* x, T* y, CUDAContext* ctx) { \
_Sigmoid<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
N, x, y); \
}
#define DEFINE_GRAD_KERNEL_LAUNCHER(T) \
template <> \
void SigmoidGrad<T, CUDAContext>( \
const int N, const T* dy, const T* y, T* dx, CUDAContext* ctx) { \
_SigmoidGrad<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
N, dy, y, dx); \
}
DEFINE_KERNEL_LAUNCHER(float);
DEFINE_KERNEL_LAUNCHER(double);
DEFINE_GRAD_KERNEL_LAUNCHER(float);
DEFINE_GRAD_KERNEL_LAUNCHER(double);
#undef DEFINE_KERNEL_LAUNCHER
#undef DEFINE_GRAD_KERNEL_LAUNCHER
} // namespace kernels
} // namespace dragon
#endif // USE_CUDA
|
1928494ed800fbfbf282501a45a7c093b4735ee0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/UpSample.cuh>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bicubic2d_out_frame(
const int num_elements,
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
const PackedTensorAccessor64<scalar_t, 4> idata,
PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int input_height = idata.size(2);
const int input_width = idata.size(3);
const int output_height = odata.size(2);
const int output_width = odata.size(3);
if (index >= num_elements) {
return;
}
// Special case: input and output are the same size, just copy
const int output_x = index % output_width;
const int output_y = index / output_width;
if (input_height == output_height && input_width == output_width) {
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; c++) {
const scalar_t val = idata[n][c][output_y][output_x];
odata[n][c][output_y][output_x] = val;
}
}
return;
}
// Interpolation kernel
accscalar_t real_x = area_pixel_compute_source_index(
width_scale, output_x, align_corners, /*cubic=*/true);
int in_x = floorf(real_x);
accscalar_t t_x = real_x - in_x;
accscalar_t real_y = area_pixel_compute_source_index(
height_scale, output_y, align_corners, /*cubic=*/true);
int in_y = floorf(real_y);
accscalar_t t_y = real_y - in_y;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; c++) {
accscalar_t coefficients[4];
for (int k = 0; k < 4; k++) {
coefficients[k] = cubic_interp1d(
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x - 1),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 0),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 1),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 2),
t_x);
}
odata[n][c][output_y][output_x] = static_cast<scalar_t>(cubic_interp1d(
coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
t_y));
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bicubic2d_backward_out_frame(
const int num_elements,
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
PackedTensorAccessor64<scalar_t, 4> idata,
const PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int input_height = idata.size(2);
const int input_width = idata.size(3);
const int output_height = odata.size(2);
const int output_width = odata.size(3);
if (index >= num_elements) {
return;
}
const int output_x = index % output_width;
const int output_y = index / output_width;
// special case: output_xust copy
if (input_height == output_height && input_width == output_width) {
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = odata[n][c][output_y][output_x];
idata[n][c][output_y][output_x] = val;
}
}
return;
}
accscalar_t real_x = area_pixel_compute_source_index(
width_scale, output_x, align_corners, /*cubic=*/true);
int input_x = floorf(real_x);
accscalar_t t_x = real_x - input_x;
accscalar_t real_y = area_pixel_compute_source_index(
height_scale, output_y, align_corners, /*cubic=*/true);
int input_y = floorf(real_y);
accscalar_t t_y = real_y - input_y;
accscalar_t x_coeffs[4];
accscalar_t y_coeffs[4];
get_cubic_upsampling_coefficients(x_coeffs, t_x);
get_cubic_upsampling_coefficients(y_coeffs, t_y);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
scalar_t out_value = odata[n][c][output_y][output_x];
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
upsample_increment_value_bounded<scalar_t, accscalar_t>(
idata,
n,
c,
input_height,
input_width,
input_y - 1 + i,
input_x - 1 + j,
out_value * y_coeffs[i] * x_coeffs[j]);
}
}
}
}
}
static void upsample_bicubic2d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_bicubic2d_out", {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input.size(0);
int channels = input.size(1);
int input_height = input.size(2);
int input_width = input.size(3);
upsample_2d_shape_check(
input,
Tensor(),
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
output.resize_({input.size(0), input.size(1), output_height, output_width});
output.zero_();
AT_ASSERT(
input_height > 0 && input_width > 0 && output_height > 0 &&
output_width > 0);
const int num_output_elements = output_height * output_width;
const int max_threads = ::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
// Launch kernel
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_bicubic2d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 4>();
auto odata = output.packed_accessor64<scalar_t, 4>();
// Get scaling factors
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
hipLaunchKernelGGL(( upsample_bicubic2d_out_frame<scalar_t, accscalar_t>)
, dim3(cuda::ATenCeilDiv(num_output_elements, max_threads)),
dim3(max_threads),
0,
stream,
num_output_elements,
rheight,
rwidth,
align_corners,
idata,
odata);
});
AT_CUDA_CHECK(hipGetLastError());
}
static void upsample_bicubic2d_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
"upsample_bicubic2d_backward_out_cuda",
{grad_output_arg, grad_input_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 4,
"It is expected input_size equals to 4, but got size ",
input_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input_size[0];
int channels = input_size[1];
int input_height = input_size[2];
int input_width = input_size[3];
upsample_2d_shape_check(
Tensor(),
grad_output_,
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
Tensor grad_output = grad_output_.contiguous();
grad_input.resize_({nbatch, channels, input_height, input_width});
grad_input.zero_();
const int num_kernels = output_height * output_width;
const int num_threads = ::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_bicubic2d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.packed_accessor64<scalar_t, 4>();
auto odata = grad_output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
hipLaunchKernelGGL(( upsample_bicubic2d_backward_out_frame<scalar_t, accscalar_t>)
, dim3(cuda::ATenCeilDiv(num_kernels, num_threads)),
dim3(num_threads),
0,
stream,
num_kernels, rheight, rwidth, align_corners, idata, odata);
});
AT_CUDA_CHECK(hipGetLastError());
}
} // namespace
Tensor& upsample_bicubic2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
upsample_bicubic2d_out_cuda_template(
output, input, output_size, align_corners, scales_h, scales_w);
return output;
}
Tensor upsample_bicubic2d_cuda(
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bicubic2d_out_cuda_template(
output, input, output_size, align_corners, scales_h, scales_w);
return output;
}
Tensor& upsample_bicubic2d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("upsample_bicubic2d_backward_out_cuda");
upsample_bicubic2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
return grad_input;
}
Tensor upsample_bicubic2d_backward_cuda(
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("upsample_bicubic2d_backward_cuda");
Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bicubic2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
return grad_input;
}
using at::native::upsample::compute_output_size;
using at::native::upsample_cuda::get_scale_value;
Tensor upsample_bicubic2d_cuda(
const Tensor& input,
c10::optional<IntArrayRef> output_size,
bool align_corners,
c10::optional<ArrayRef<double>> scale_factors) {
auto output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto osize = compute_output_size(input.sizes(), output_size, scale_factors);
auto scale_h = get_scale_value(scale_factors, 0);
auto scale_w = get_scale_value(scale_factors, 1);
upsample_bicubic2d_out_cuda_template(output, input, osize, align_corners, scale_h, scale_w);
return output;
}
Tensor upsample_bicubic2d_backward_cuda(
const Tensor& grad_output,
c10::optional<IntArrayRef> output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<ArrayRef<double>> scale_factors) {
auto osize = compute_output_size(input_size, output_size, scale_factors);
auto scale_h = get_scale_value(scale_factors, 0);
auto scale_w = get_scale_value(scale_factors, 1);
auto grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bicubic2d_backward_out_cuda_template(
grad_input, grad_output, osize, input_size, align_corners, scale_h, scale_w);
return grad_input;
}
} // namespace native
} // namespace at
|
1928494ed800fbfbf282501a45a7c093b4735ee0.cu
|
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/UpSample.cuh>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bicubic2d_out_frame(
const int num_elements,
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
const PackedTensorAccessor64<scalar_t, 4> idata,
PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int input_height = idata.size(2);
const int input_width = idata.size(3);
const int output_height = odata.size(2);
const int output_width = odata.size(3);
if (index >= num_elements) {
return;
}
// Special case: input and output are the same size, just copy
const int output_x = index % output_width;
const int output_y = index / output_width;
if (input_height == output_height && input_width == output_width) {
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; c++) {
const scalar_t val = idata[n][c][output_y][output_x];
odata[n][c][output_y][output_x] = val;
}
}
return;
}
// Interpolation kernel
accscalar_t real_x = area_pixel_compute_source_index(
width_scale, output_x, align_corners, /*cubic=*/true);
int in_x = floorf(real_x);
accscalar_t t_x = real_x - in_x;
accscalar_t real_y = area_pixel_compute_source_index(
height_scale, output_y, align_corners, /*cubic=*/true);
int in_y = floorf(real_y);
accscalar_t t_y = real_y - in_y;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; c++) {
accscalar_t coefficients[4];
for (int k = 0; k < 4; k++) {
coefficients[k] = cubic_interp1d(
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x - 1),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 0),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 1),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 2),
t_x);
}
odata[n][c][output_y][output_x] = static_cast<scalar_t>(cubic_interp1d(
coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
t_y));
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bicubic2d_backward_out_frame(
const int num_elements,
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
PackedTensorAccessor64<scalar_t, 4> idata,
const PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int input_height = idata.size(2);
const int input_width = idata.size(3);
const int output_height = odata.size(2);
const int output_width = odata.size(3);
if (index >= num_elements) {
return;
}
const int output_x = index % output_width;
const int output_y = index / output_width;
// special case: output_xust copy
if (input_height == output_height && input_width == output_width) {
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = odata[n][c][output_y][output_x];
idata[n][c][output_y][output_x] = val;
}
}
return;
}
accscalar_t real_x = area_pixel_compute_source_index(
width_scale, output_x, align_corners, /*cubic=*/true);
int input_x = floorf(real_x);
accscalar_t t_x = real_x - input_x;
accscalar_t real_y = area_pixel_compute_source_index(
height_scale, output_y, align_corners, /*cubic=*/true);
int input_y = floorf(real_y);
accscalar_t t_y = real_y - input_y;
accscalar_t x_coeffs[4];
accscalar_t y_coeffs[4];
get_cubic_upsampling_coefficients(x_coeffs, t_x);
get_cubic_upsampling_coefficients(y_coeffs, t_y);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
scalar_t out_value = odata[n][c][output_y][output_x];
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
upsample_increment_value_bounded<scalar_t, accscalar_t>(
idata,
n,
c,
input_height,
input_width,
input_y - 1 + i,
input_x - 1 + j,
out_value * y_coeffs[i] * x_coeffs[j]);
}
}
}
}
}
static void upsample_bicubic2d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_bicubic2d_out", {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input.size(0);
int channels = input.size(1);
int input_height = input.size(2);
int input_width = input.size(3);
upsample_2d_shape_check(
input,
Tensor(),
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
output.resize_({input.size(0), input.size(1), output_height, output_width});
output.zero_();
AT_ASSERT(
input_height > 0 && input_width > 0 && output_height > 0 &&
output_width > 0);
const int num_output_elements = output_height * output_width;
const int max_threads = std::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
// Launch kernel
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_bicubic2d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 4>();
auto odata = output.packed_accessor64<scalar_t, 4>();
// Get scaling factors
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
upsample_bicubic2d_out_frame<scalar_t, accscalar_t>
<<<cuda::ATenCeilDiv(num_output_elements, max_threads),
max_threads,
0,
stream>>>(
num_output_elements,
rheight,
rwidth,
align_corners,
idata,
odata);
});
AT_CUDA_CHECK(cudaGetLastError());
}
static void upsample_bicubic2d_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
"upsample_bicubic2d_backward_out_cuda",
{grad_output_arg, grad_input_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 4,
"It is expected input_size equals to 4, but got size ",
input_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input_size[0];
int channels = input_size[1];
int input_height = input_size[2];
int input_width = input_size[3];
upsample_2d_shape_check(
Tensor(),
grad_output_,
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
Tensor grad_output = grad_output_.contiguous();
grad_input.resize_({nbatch, channels, input_height, input_width});
grad_input.zero_();
const int num_kernels = output_height * output_width;
const int num_threads = std::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_bicubic2d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.packed_accessor64<scalar_t, 4>();
auto odata = grad_output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
upsample_bicubic2d_backward_out_frame<scalar_t, accscalar_t>
<<<cuda::ATenCeilDiv(num_kernels, num_threads),
num_threads,
0,
stream>>>(
num_kernels, rheight, rwidth, align_corners, idata, odata);
});
AT_CUDA_CHECK(cudaGetLastError());
}
} // namespace
Tensor& upsample_bicubic2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
upsample_bicubic2d_out_cuda_template(
output, input, output_size, align_corners, scales_h, scales_w);
return output;
}
Tensor upsample_bicubic2d_cuda(
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bicubic2d_out_cuda_template(
output, input, output_size, align_corners, scales_h, scales_w);
return output;
}
Tensor& upsample_bicubic2d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("upsample_bicubic2d_backward_out_cuda");
upsample_bicubic2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
return grad_input;
}
Tensor upsample_bicubic2d_backward_cuda(
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("upsample_bicubic2d_backward_cuda");
Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bicubic2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
return grad_input;
}
using at::native::upsample::compute_output_size;
using at::native::upsample_cuda::get_scale_value;
Tensor upsample_bicubic2d_cuda(
const Tensor& input,
c10::optional<IntArrayRef> output_size,
bool align_corners,
c10::optional<ArrayRef<double>> scale_factors) {
auto output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto osize = compute_output_size(input.sizes(), output_size, scale_factors);
auto scale_h = get_scale_value(scale_factors, 0);
auto scale_w = get_scale_value(scale_factors, 1);
upsample_bicubic2d_out_cuda_template(output, input, osize, align_corners, scale_h, scale_w);
return output;
}
Tensor upsample_bicubic2d_backward_cuda(
const Tensor& grad_output,
c10::optional<IntArrayRef> output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<ArrayRef<double>> scale_factors) {
auto osize = compute_output_size(input_size, output_size, scale_factors);
auto scale_h = get_scale_value(scale_factors, 0);
auto scale_w = get_scale_value(scale_factors, 1);
auto grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bicubic2d_backward_out_cuda_template(
grad_input, grad_output, osize, input_size, align_corners, scale_h, scale_w);
return grad_input;
}
} // namespace native
} // namespace at
|
ea2a25df1256c9fc584e92729e0fb62b05337fa3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "caffe/util/math_functions.hpp"
#include "caffe/common.hpp"
#include "GeneralizedPatchmatch.cuh"
#include "DeepAnalogy.cuh"
#include "WLS.h"
#include "Deconv.h"
struct Parameters
{
std::vector<std::string> layers; //which layers used as content
int patch_size0;
int iter;
};
__host__ void norm(float* &dst, float* src, float* smooth, Dim dim){
int count = dim.channel*dim.height*dim.width;
float* x = src;
float* x2;
hipMalloc(&x2, count*sizeof(float));
caffe_gpu_mul(count, x, x, x2);
//caculate dis
float*sum;
float* ones;
hipMalloc(&sum, dim.height*dim.width*sizeof(float));
hipMalloc(&ones, dim.channel*sizeof(float));
caffe_gpu_set(dim.channel, 1.0f, ones);
caffe_gpu_gemv(CblasTrans, dim.channel, dim.height*dim.width, 1.0f, x2, ones, 0.0f, sum);
float *dis;
hipMalloc(&dis, dim.height*dim.width*sizeof(float));
caffe_gpu_powx(dim.height*dim.width, sum, 0.5f, dis);
if (smooth != NULL)
{
hipMemcpy(smooth, sum, dim.height*dim.width*sizeof(float), hipMemcpyDeviceToDevice);
int index;
float minv, maxv;
hipblasIsamin(Caffe::cublas_handle(), dim.height*dim.width, sum, 1, &index);
hipMemcpy(&minv, sum + index - 1, sizeof(float), hipMemcpyDeviceToHost);
hipblasIsamax(Caffe::cublas_handle(), dim.height*dim.width, sum, 1, &index);
hipMemcpy(&maxv, sum + index - 1, sizeof(float), hipMemcpyDeviceToHost);
caffe_gpu_add_scalar(dim.height*dim.width, -minv, smooth);
caffe_gpu_scal(dim.height*dim.width, 1.0f / (maxv - minv), smooth);
}
//norm
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, dim.channel, dim.width*dim.height, 1, 1.0f, ones, dis, 0.0f, x2);
caffe_gpu_div(count, src, x2, dst);
hipFree(x2);
hipFree(ones);
hipFree(dis);
hipFree(sum);
}
DeepAnalogy::DeepAnalogy(){
resizeRatio = 1;
weightLevel = 3;
photoTransfer = false;
file_A = "";
file_BP = "";
path_output = "";
path_model = "";
}
DeepAnalogy::~DeepAnalogy(){
}
void DeepAnalogy::SetRatio(float ratio){
resizeRatio = ratio;
}
void DeepAnalogy::SetBlendWeight(int level){
weightLevel = level;
}
void DeepAnalogy::UsePhotoTransfer(bool flag){
photoTransfer = flag;
}
void DeepAnalogy::SetModel(string path){
path_model =path;
}
void DeepAnalogy::SetA(string f_a){
file_A = f_a;
}
void DeepAnalogy::SetBPrime(string f_bp){
file_BP = f_bp;
}
void DeepAnalogy::SetOutputDir(string f_o){
path_output = f_o;
}
void DeepAnalogy::SetGPU(int no){
hipSetDevice(no);
}
void DeepAnalogy::LoadInputs(){
float ratio;
Mat ori_AL = imread(file_A);
Mat ori_BPL = imread(file_BP);
if (ori_AL.empty() || ori_BPL.empty())
{
cout << "image cannot read!" << endl;
waitKey();
return;
}
ori_A_cols = ori_AL.cols;
ori_A_rows = ori_AL.rows;
ori_BP_cols = ori_BPL.cols;
ori_BP_rows = ori_BPL.rows;
if (ori_AL.rows > 700)
{
ratio = 700.f / ori_AL.rows;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.cols > 700)
{
ratio = 700.f / ori_AL.cols;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.rows < 200)
{
ratio = 200.f / ori_AL.rows;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.cols < 200)
{
ratio = 200.f / ori_AL.cols;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_BPL.rows > 700)
{
ratio = 700.f / ori_BPL.rows;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.cols > 700)
{
ratio = 700.f / ori_BPL.cols;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.rows < 200)
{
ratio = 200.f / ori_BPL.rows;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.cols < 200)
{
ratio = 200.f / ori_BPL.cols;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if ((ori_AL.cols*ori_AL.rows) > 350000)
{
ratio = sqrt((float)(350000) / (float)(ori_AL.cols*ori_AL.rows));
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if ((ori_BPL.cols*ori_BPL.rows) > 350000)
{
ratio = sqrt((float)(350000) / (float)(ori_BPL.cols*ori_BPL.rows));
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
int maxLateral, minLateral;
maxLateral = max(max(ori_AL.rows, ori_AL.cols), max(ori_BPL.rows, ori_BPL.cols));
minLateral = min(min(ori_AL.rows, ori_AL.cols), min(ori_BPL.rows, ori_BPL.cols));
if (maxLateral > 700 || minLateral < 200)
{
cout << "The sizes of images are not permitted. (One side cannot be larger than 700 or smaller than 200 and the area should not be larger than 350000)" << endl;
waitKey();
return;
}
cur_A_cols = ori_AL.cols;
cur_A_rows = ori_AL.rows;
cur_BP_cols = ori_BPL.cols;
cur_BP_rows = ori_BPL.rows;
if (ori_A_cols != ori_AL.cols)
{
cout << "The input image A has been resized to " << cur_A_cols << " x " << cur_A_rows << ".\n";
}
if (ori_BP_cols != ori_BPL.cols)
{
cout << "The input image B prime has been resized to " << cur_BP_cols << " x " << cur_BP_rows << ".\n";
}
cv::resize(ori_AL, img_AL, Size(), (float)cur_A_cols / ori_AL.cols, (float)cur_A_rows / ori_AL.rows, INTER_CUBIC);
cv::resize(ori_BPL, img_BPL, Size(), (float)cur_BP_cols / ori_BPL.cols, (float)cur_BP_rows / ori_BPL.rows, INTER_CUBIC);
}
void DeepAnalogy::ComputeAnn() {
if (img_BPL.empty()||img_AL.empty())
{
waitKey();
return;
}
const int param_size = 8;
int ann_size_AB, ann_size_BA;//should be assigned later
int *params_host, *params_device_AB, *params_device_BA;
unsigned int *ann_device_AB, *ann_host_AB, *ann_device_BA, *ann_host_BA;
float *annd_device_AB, *annd_host_AB, *annd_device_BA, *annd_host_BA;
char fname[256];
//set parameters
Parameters params;
params.layers.push_back("conv5_1");
params.layers.push_back("conv4_1");
params.layers.push_back("conv3_1");
params.layers.push_back("conv2_1");
params.layers.push_back("conv1_1");
params.layers.push_back("data");
std::vector<float> weight;
weight.push_back(1.0);
switch (weightLevel)
{
case 1:
weight.push_back(0.7);
weight.push_back(0.6);
weight.push_back(0.5);
weight.push_back(0.0);
break;
case 2:
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.6);
weight.push_back(0.1);
break;
case 3:
weight.push_back(0.9);
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.2);
break;
default:
weight.push_back(0.9);
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.2);
break;
}
weight.push_back(0.0);
std::vector<int> sizes;
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(5);
sizes.push_back(5);
sizes.push_back(3);
params.iter = 10;
//scale and enhance
float ratio = resizeRatio;
Mat img_BP, img_A;
cv::resize(img_AL, img_A, Size(), ratio, ratio, INTER_CUBIC);
cv::resize(img_BPL, img_BP, Size(), ratio, ratio, INTER_CUBIC);
std::vector<int> range;
if (img_A.cols > img_A.rows)
{
range.push_back(img_A.cols / 16);
}
else
{
range.push_back(img_A.rows / 16);
}
range.push_back(6);
range.push_back(6);
range.push_back(4);
range.push_back(4);
range.push_back(2);
//load caffe
::google::InitGoogleLogging("deepanalogy");
string model_file = "vgg19/VGG_ILSVRC_19_layers_deploy.prototxt";
string trained_file = "vgg19/VGG_ILSVRC_19_layers.caffemodel";
Classifier classifier_A(path_model + model_file, path_model + trained_file);
Classifier classifier_B(path_model + model_file, path_model + trained_file);
std::vector<float *> data_A, data_A1;
data_A.resize(params.layers.size());
data_A1.resize(params.layers.size());
std::vector<Dim> data_A_size;
data_A_size.resize(params.layers.size());
classifier_A.Predict(img_A, params.layers, data_A1, data_A, data_A_size);
std::vector<float *> data_B, data_BP;
data_B.resize(params.layers.size());
data_BP.resize(params.layers.size());
std::vector<Dim> data_B_size;
data_B_size.resize(params.layers.size());
classifier_B.Predict(img_BP, params.layers, data_B, data_BP, data_B_size);
clock_t start, finish;
double duration;
start = clock();
ann_size_AB = img_AL.cols*img_AL.rows;
ann_size_BA = img_BPL.cols*img_BPL.rows;
params_host = (int *)malloc(param_size * sizeof(int));
ann_host_AB = (unsigned int *)malloc(ann_size_AB * sizeof(unsigned int));
annd_host_AB = (float *)malloc(ann_size_AB * sizeof(float));
ann_host_BA = (unsigned int *)malloc(ann_size_BA * sizeof(unsigned int));
annd_host_BA = (float *)malloc(ann_size_BA * sizeof(float));
hipMalloc(¶ms_device_AB, param_size * sizeof(int));
hipMalloc(¶ms_device_BA, param_size * sizeof(int));
hipMalloc(&ann_device_AB, ann_size_AB * sizeof(unsigned int));
hipMalloc(&annd_device_AB, ann_size_AB * sizeof(float));
hipMalloc(&ann_device_BA, ann_size_BA * sizeof(unsigned int));
hipMalloc(&annd_device_BA, ann_size_BA * sizeof(float));
int numlayer = params.layers.size();
//feature match
for (int curr_layer = 0; curr_layer < numlayer - 1; curr_layer++)//from 32 to 512
{
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
hipMemcpy(params_device_AB, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
//copy to device
hipMemcpy(params_device_BA, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
dim3 blocksPerGridAB(data_A_size[curr_layer].width / 20 + 1, data_A_size[curr_layer].height / 20 + 1, 1);
dim3 threadsPerBlockAB(20, 20, 1);
ann_size_AB = data_A_size[curr_layer].width* data_A_size[curr_layer].height;
dim3 blocksPerGridBA(data_B_size[curr_layer].width / 20 + 1, data_B_size[curr_layer].height / 20 + 1, 1);
dim3 threadsPerBlockBA(20, 20, 1);
ann_size_BA = data_B_size[curr_layer].width* data_B_size[curr_layer].height;
//initialize ann if needed
if (curr_layer == 0)//initialize, rows and cols both less than 32, just use one block
{
initialAnn_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, params_device_AB);
initialAnn_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, params_device_BA);
}
else {//upsampling, notice this block's dimension is twice the ann at this point
unsigned int * ann_tmp;
hipMalloc(&ann_tmp, ann_size_AB * sizeof(unsigned int));
upSample_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, ann_tmp, params_device_AB,
data_A_size[curr_layer - 1].width, data_A_size[curr_layer - 1].height);//get new ann_device
hipMemcpy(ann_device_AB, ann_tmp, ann_size_AB * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipFree(ann_tmp);
hipMalloc(&ann_tmp, ann_size_BA * sizeof(unsigned int));
upSample_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, ann_tmp, params_device_BA,
data_B_size[curr_layer - 1].width, data_B_size[curr_layer - 1].height);//get new ann_device
hipMemcpy(ann_device_BA, ann_tmp, ann_size_BA * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipFree(ann_tmp);
}
//normarlize two data
float *Ndata_A, *Ndata_A1, *Ndata_B, *Ndata_BP;
float *response_A, *response_BP;
hipMalloc(&Ndata_A, data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
hipMalloc(&Ndata_A1, data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
hipMalloc(&response_A, data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
hipMalloc(&Ndata_B, data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
hipMalloc(&Ndata_BP, data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
hipMalloc(&response_BP, data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
norm(Ndata_A, data_A[curr_layer], response_A, data_A_size[curr_layer]);
norm(Ndata_BP, data_BP[curr_layer], response_BP, data_B_size[curr_layer]);
Mat temp1, temp2;
cv::resize(img_AL, temp1, cv::Size(data_A_size[curr_layer].width, data_A_size[curr_layer].height));
cv::resize(img_BPL, temp2, cv::Size(data_B_size[curr_layer].width, data_B_size[curr_layer].height));
Mat response1, response2;
response1 = Mat(temp1.size(), CV_32FC1);
response2 = Mat(temp2.size(), CV_32FC1);
hipMemcpy(response1.data, response_A, data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(response2.data, response_BP, data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float), hipMemcpyDeviceToHost);
Mat response_byte1, response_byte2;
response1.convertTo(response_byte1, CV_8UC1, 255);
response2.convertTo(response_byte2, CV_8UC1, 255);
blend << <blocksPerGridAB, threadsPerBlockAB >> >(response_A, data_A[curr_layer], data_A1[curr_layer], weight[curr_layer], params_device_AB);
blend << <blocksPerGridBA, threadsPerBlockBA >> >(response_BP, data_BP[curr_layer], data_B[curr_layer], weight[curr_layer], params_device_BA);
norm(Ndata_A1, data_A1[curr_layer], NULL, data_A_size[curr_layer]);
norm(Ndata_B, data_B[curr_layer], NULL, data_B_size[curr_layer]);
//patchmatch
cout << "Finding nearest neighbor field using PatchMatch Algorithm at layer:" << params.layers[curr_layer] << ".\n";
patchmatch << <blocksPerGridAB, threadsPerBlockAB >> >(Ndata_A1, Ndata_BP, Ndata_A, Ndata_B, ann_device_AB, annd_device_AB, params_device_AB);
patchmatch << <blocksPerGridBA, threadsPerBlockBA >> >(Ndata_B, Ndata_A, Ndata_BP, Ndata_A1, ann_device_BA, annd_device_BA, params_device_BA);
hipFree(Ndata_A);
hipFree(Ndata_A1);
hipFree(Ndata_B);
hipFree(Ndata_BP);
hipFree(response_A);
hipFree(response_BP);
//deconv
if (curr_layer < numlayer - 2)
{
int next_layer = curr_layer + 2;
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
hipMemcpy(params_device_AB, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
//copy to device
hipMemcpy(params_device_BA, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
blocksPerGridAB = dim3(data_A_size[curr_layer].width / 20 + 1, data_A_size[curr_layer].height / 20 + 1, 1);
threadsPerBlockAB = dim3(20, 20, 1);
ann_size_AB = data_A_size[curr_layer].width* data_A_size[curr_layer].height;
blocksPerGridBA = dim3(data_B_size[curr_layer].width / 20 + 1, data_B_size[curr_layer].height / 20 + 1, 1);
threadsPerBlockBA = dim3(20, 20, 1);
ann_size_BA = data_B_size[curr_layer].width* data_B_size[curr_layer].height;
int num1 = data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height;
int num2 = data_A_size[next_layer].channel*data_A_size[next_layer].width*data_A_size[next_layer].height;
float *target;
hipMalloc(&target, num1 * sizeof(float));
avg_vote << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, data_BP[curr_layer], target, params_device_AB);
deconv(&classifier_A, params.layers[curr_layer], target, data_A_size[curr_layer], params.layers[next_layer], data_A1[next_layer], data_A_size[next_layer]);
hipFree(target);
num1 = data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height;
num2 = data_B_size[next_layer].channel*data_B_size[next_layer].width*data_B_size[next_layer].height;
hipMalloc(&target, num1 * sizeof(float));
avg_vote << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, data_A[curr_layer], target, params_device_BA);
deconv(&classifier_B, params.layers[curr_layer], target, data_B_size[curr_layer], params.layers[next_layer], data_B[next_layer], data_B_size[next_layer]);
hipFree(target);
}
}
//upsample
int curr_layer = numlayer - 1;
{
//set parameters
params_host[0] = 3;//channels
params_host[1] = img_AL.rows;
params_host[2] = img_AL.cols;
params_host[3] = img_BPL.rows;
params_host[4] = img_BPL.cols;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
hipMemcpy(params_device_AB, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
//set parameters
params_host[0] = 3;//channels
params_host[1] = img_BPL.rows;
params_host[2] = img_BPL.cols;
params_host[3] = img_AL.rows;
params_host[4] = img_AL.cols;
//copy to device
hipMemcpy(params_device_BA, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
dim3 blocksPerGridAB(img_AL.cols / 20 + 1, img_AL.rows / 20 + 1, 1);
dim3 threadsPerBlockAB(20, 20, 1);
ann_size_AB = img_AL.cols* img_AL.rows;
dim3 blocksPerGridBA(img_BPL.cols / 20 + 1, img_BPL.rows / 20 + 1, 1);
dim3 threadsPerBlockBA(20, 20, 1);
ann_size_BA = img_BPL.rows* img_BPL.cols;
//updample
unsigned int * ann_tmp;
hipMalloc(&ann_tmp, ann_size_AB * sizeof(unsigned int));
upSample_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, ann_tmp, params_device_AB,
data_A_size[curr_layer - 1].width, data_A_size[curr_layer - 1].height);//get new ann_device
hipMemcpy(ann_device_AB, ann_tmp, ann_size_AB * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipFree(ann_tmp);
hipMalloc(&ann_tmp, ann_size_BA * sizeof(unsigned int));
upSample_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, ann_tmp, params_device_BA,
data_B_size[curr_layer - 1].width, data_B_size[curr_layer - 1].height);//get new ann_device
hipMemcpy(ann_device_BA, ann_tmp, ann_size_BA * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipFree(ann_tmp);
hipMemcpy(ann_host_AB, ann_device_AB, ann_size_AB * sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(ann_host_BA, ann_device_BA, ann_size_BA * sizeof(unsigned int), hipMemcpyDeviceToHost);
//free space in device, only need to free pa and pb which are created temporarily
//image downBAale
Mat flow, result_AB, result_BA, err, out, normal;
flow = reconstruct_dflow(img_AL, img_BPL, ann_host_AB, sizes[curr_layer]);
result_AB = reconstruct_avg(img_AL, img_BPL, ann_host_AB, sizes[curr_layer]);
cv::resize(result_AB, out, Size(), (float)ori_A_cols / cur_A_cols, (float)ori_A_rows / cur_A_rows, INTER_CUBIC);
sprintf(fname, "resultAB.png");
imwrite(path_output + fname, out);
flow = reconstruct_dflow(img_BPL, img_AL, ann_host_BA, sizes[curr_layer]);
result_BA = reconstruct_avg(img_BPL, img_AL, ann_host_BA, sizes[curr_layer]);
cv::resize(result_BA, out, Size(), (float)ori_BP_cols / cur_BP_cols, (float)ori_BP_rows / cur_BP_rows, INTER_CUBIC);
sprintf(fname, "resultBA.png");
imwrite(path_output + fname, out);
if (photoTransfer)
{
cout << "Refining photo transfer." << endl;
Mat filtered_AB, filtered_BA, filtered_A, filtered_B, refine_AB, refine_BA;
Mat origin_A, origin_B, res_AB, res_BA;
img_AL.convertTo(origin_A, CV_32FC3, 1/255.0);
img_BPL.convertTo(origin_B, CV_32FC3, 1 / 255.0);
result_AB.convertTo(res_AB, CV_32FC3, 1 / 255.0);
result_BA.convertTo(res_BA, CV_32FC3, 1 / 255.0);
WeightedLeastSquare(filtered_AB, origin_A, res_AB);
WeightedLeastSquare(filtered_BA, origin_B, res_BA);
WeightedLeastSquare(filtered_A, origin_A, origin_A);
WeightedLeastSquare(filtered_B, origin_B, origin_B);
refine_AB = origin_A + filtered_AB - filtered_A;
refine_BA = origin_B + filtered_BA - filtered_B;
sprintf(fname, "refineAB.png");
refine_AB.convertTo(normal, CV_32FC3, 255.0);
cv::resize(normal, out, Size(), (float)ori_A_cols / cur_A_cols, (float)ori_A_rows / cur_A_rows, INTER_CUBIC);
imwrite(path_output + fname, out);
sprintf(fname, "refineBA.png");
refine_BA.convertTo(normal, CV_32FC3, 255.0);
cv::resize(normal, out, Size(), (float)ori_BP_cols / cur_BP_cols, (float)ori_BP_rows / cur_BP_rows, INTER_CUBIC);
imwrite(path_output + fname, out);
}
}
cout << "Saving flow result." << "\n";
//save ann
{
ofstream output1;
char fname[256];
sprintf(fname, "flowAB.txt");
output1.open(path_output + fname);
for (int y = 0; y < img_AL.rows; y++)
for (int x = 0; x < img_AL.cols; x++)
{
unsigned int v = ann_host_AB[y*img_AL.cols + x];
int xbest = INT_TO_X(v);
int ybest = INT_TO_Y(v);
output1 << xbest - x << " " << ybest - y << endl;
}
output1.close();
ofstream output2;
sprintf(fname, "flowBA.txt");
output2.open(path_output + fname);
for (int y = 0; y < img_BPL.rows; y++){
for (int x = 0; x < img_BPL.cols; x++)
{
unsigned int v = ann_host_BA[y*img_BPL.cols + x];
int xbest = INT_TO_X(v);
int ybest = INT_TO_Y(v);
output2 << xbest - x << " " << ybest - y << endl;
}
}
output2.close();
}
hipFree(params_device_AB);
hipFree(ann_device_AB);
hipFree(annd_device_AB);
hipFree(params_device_BA);
hipFree(ann_device_BA);
hipFree(annd_device_BA);
free(ann_host_AB);
free(annd_host_AB);
free(ann_host_BA);
free(annd_host_BA);
free(params_host);
for (int i = 0; i < numlayer; i++)
{
hipFree(data_A[i]);
hipFree(data_BP[i]);
}
finish = clock();
duration = (double)(finish - start) / CLOCKS_PER_SEC;
cout << "Finished finding ann. Time : " << duration << endl;
google::ShutdownGoogleLogging();
classifier_A.DeleteNet();
classifier_B.DeleteNet();
}
|
ea2a25df1256c9fc584e92729e0fb62b05337fa3.cu
|
#include "caffe/util/math_functions.hpp"
#include "caffe/common.hpp"
#include "GeneralizedPatchmatch.cuh"
#include "DeepAnalogy.cuh"
#include "WLS.h"
#include "Deconv.h"
struct Parameters
{
std::vector<std::string> layers; //which layers used as content
int patch_size0;
int iter;
};
__host__ void norm(float* &dst, float* src, float* smooth, Dim dim){
int count = dim.channel*dim.height*dim.width;
float* x = src;
float* x2;
cudaMalloc(&x2, count*sizeof(float));
caffe_gpu_mul(count, x, x, x2);
//caculate dis
float*sum;
float* ones;
cudaMalloc(&sum, dim.height*dim.width*sizeof(float));
cudaMalloc(&ones, dim.channel*sizeof(float));
caffe_gpu_set(dim.channel, 1.0f, ones);
caffe_gpu_gemv(CblasTrans, dim.channel, dim.height*dim.width, 1.0f, x2, ones, 0.0f, sum);
float *dis;
cudaMalloc(&dis, dim.height*dim.width*sizeof(float));
caffe_gpu_powx(dim.height*dim.width, sum, 0.5f, dis);
if (smooth != NULL)
{
cudaMemcpy(smooth, sum, dim.height*dim.width*sizeof(float), cudaMemcpyDeviceToDevice);
int index;
float minv, maxv;
cublasIsamin(Caffe::cublas_handle(), dim.height*dim.width, sum, 1, &index);
cudaMemcpy(&minv, sum + index - 1, sizeof(float), cudaMemcpyDeviceToHost);
cublasIsamax(Caffe::cublas_handle(), dim.height*dim.width, sum, 1, &index);
cudaMemcpy(&maxv, sum + index - 1, sizeof(float), cudaMemcpyDeviceToHost);
caffe_gpu_add_scalar(dim.height*dim.width, -minv, smooth);
caffe_gpu_scal(dim.height*dim.width, 1.0f / (maxv - minv), smooth);
}
//norm
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, dim.channel, dim.width*dim.height, 1, 1.0f, ones, dis, 0.0f, x2);
caffe_gpu_div(count, src, x2, dst);
cudaFree(x2);
cudaFree(ones);
cudaFree(dis);
cudaFree(sum);
}
DeepAnalogy::DeepAnalogy(){
resizeRatio = 1;
weightLevel = 3;
photoTransfer = false;
file_A = "";
file_BP = "";
path_output = "";
path_model = "";
}
DeepAnalogy::~DeepAnalogy(){
}
void DeepAnalogy::SetRatio(float ratio){
resizeRatio = ratio;
}
void DeepAnalogy::SetBlendWeight(int level){
weightLevel = level;
}
void DeepAnalogy::UsePhotoTransfer(bool flag){
photoTransfer = flag;
}
void DeepAnalogy::SetModel(string path){
path_model =path;
}
void DeepAnalogy::SetA(string f_a){
file_A = f_a;
}
void DeepAnalogy::SetBPrime(string f_bp){
file_BP = f_bp;
}
void DeepAnalogy::SetOutputDir(string f_o){
path_output = f_o;
}
void DeepAnalogy::SetGPU(int no){
cudaSetDevice(no);
}
void DeepAnalogy::LoadInputs(){
float ratio;
Mat ori_AL = imread(file_A);
Mat ori_BPL = imread(file_BP);
if (ori_AL.empty() || ori_BPL.empty())
{
cout << "image cannot read!" << endl;
waitKey();
return;
}
ori_A_cols = ori_AL.cols;
ori_A_rows = ori_AL.rows;
ori_BP_cols = ori_BPL.cols;
ori_BP_rows = ori_BPL.rows;
if (ori_AL.rows > 700)
{
ratio = 700.f / ori_AL.rows;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.cols > 700)
{
ratio = 700.f / ori_AL.cols;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.rows < 200)
{
ratio = 200.f / ori_AL.rows;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.cols < 200)
{
ratio = 200.f / ori_AL.cols;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_BPL.rows > 700)
{
ratio = 700.f / ori_BPL.rows;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.cols > 700)
{
ratio = 700.f / ori_BPL.cols;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.rows < 200)
{
ratio = 200.f / ori_BPL.rows;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.cols < 200)
{
ratio = 200.f / ori_BPL.cols;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if ((ori_AL.cols*ori_AL.rows) > 350000)
{
ratio = sqrt((float)(350000) / (float)(ori_AL.cols*ori_AL.rows));
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if ((ori_BPL.cols*ori_BPL.rows) > 350000)
{
ratio = sqrt((float)(350000) / (float)(ori_BPL.cols*ori_BPL.rows));
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
int maxLateral, minLateral;
maxLateral = max(max(ori_AL.rows, ori_AL.cols), max(ori_BPL.rows, ori_BPL.cols));
minLateral = min(min(ori_AL.rows, ori_AL.cols), min(ori_BPL.rows, ori_BPL.cols));
if (maxLateral > 700 || minLateral < 200)
{
cout << "The sizes of images are not permitted. (One side cannot be larger than 700 or smaller than 200 and the area should not be larger than 350000)" << endl;
waitKey();
return;
}
cur_A_cols = ori_AL.cols;
cur_A_rows = ori_AL.rows;
cur_BP_cols = ori_BPL.cols;
cur_BP_rows = ori_BPL.rows;
if (ori_A_cols != ori_AL.cols)
{
cout << "The input image A has been resized to " << cur_A_cols << " x " << cur_A_rows << ".\n";
}
if (ori_BP_cols != ori_BPL.cols)
{
cout << "The input image B prime has been resized to " << cur_BP_cols << " x " << cur_BP_rows << ".\n";
}
cv::resize(ori_AL, img_AL, Size(), (float)cur_A_cols / ori_AL.cols, (float)cur_A_rows / ori_AL.rows, INTER_CUBIC);
cv::resize(ori_BPL, img_BPL, Size(), (float)cur_BP_cols / ori_BPL.cols, (float)cur_BP_rows / ori_BPL.rows, INTER_CUBIC);
}
void DeepAnalogy::ComputeAnn() {
if (img_BPL.empty()||img_AL.empty())
{
waitKey();
return;
}
const int param_size = 8;
int ann_size_AB, ann_size_BA;//should be assigned later
int *params_host, *params_device_AB, *params_device_BA;
unsigned int *ann_device_AB, *ann_host_AB, *ann_device_BA, *ann_host_BA;
float *annd_device_AB, *annd_host_AB, *annd_device_BA, *annd_host_BA;
char fname[256];
//set parameters
Parameters params;
params.layers.push_back("conv5_1");
params.layers.push_back("conv4_1");
params.layers.push_back("conv3_1");
params.layers.push_back("conv2_1");
params.layers.push_back("conv1_1");
params.layers.push_back("data");
std::vector<float> weight;
weight.push_back(1.0);
switch (weightLevel)
{
case 1:
weight.push_back(0.7);
weight.push_back(0.6);
weight.push_back(0.5);
weight.push_back(0.0);
break;
case 2:
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.6);
weight.push_back(0.1);
break;
case 3:
weight.push_back(0.9);
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.2);
break;
default:
weight.push_back(0.9);
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.2);
break;
}
weight.push_back(0.0);
std::vector<int> sizes;
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(5);
sizes.push_back(5);
sizes.push_back(3);
params.iter = 10;
//scale and enhance
float ratio = resizeRatio;
Mat img_BP, img_A;
cv::resize(img_AL, img_A, Size(), ratio, ratio, INTER_CUBIC);
cv::resize(img_BPL, img_BP, Size(), ratio, ratio, INTER_CUBIC);
std::vector<int> range;
if (img_A.cols > img_A.rows)
{
range.push_back(img_A.cols / 16);
}
else
{
range.push_back(img_A.rows / 16);
}
range.push_back(6);
range.push_back(6);
range.push_back(4);
range.push_back(4);
range.push_back(2);
//load caffe
::google::InitGoogleLogging("deepanalogy");
string model_file = "vgg19/VGG_ILSVRC_19_layers_deploy.prototxt";
string trained_file = "vgg19/VGG_ILSVRC_19_layers.caffemodel";
Classifier classifier_A(path_model + model_file, path_model + trained_file);
Classifier classifier_B(path_model + model_file, path_model + trained_file);
std::vector<float *> data_A, data_A1;
data_A.resize(params.layers.size());
data_A1.resize(params.layers.size());
std::vector<Dim> data_A_size;
data_A_size.resize(params.layers.size());
classifier_A.Predict(img_A, params.layers, data_A1, data_A, data_A_size);
std::vector<float *> data_B, data_BP;
data_B.resize(params.layers.size());
data_BP.resize(params.layers.size());
std::vector<Dim> data_B_size;
data_B_size.resize(params.layers.size());
classifier_B.Predict(img_BP, params.layers, data_B, data_BP, data_B_size);
clock_t start, finish;
double duration;
start = clock();
ann_size_AB = img_AL.cols*img_AL.rows;
ann_size_BA = img_BPL.cols*img_BPL.rows;
params_host = (int *)malloc(param_size * sizeof(int));
ann_host_AB = (unsigned int *)malloc(ann_size_AB * sizeof(unsigned int));
annd_host_AB = (float *)malloc(ann_size_AB * sizeof(float));
ann_host_BA = (unsigned int *)malloc(ann_size_BA * sizeof(unsigned int));
annd_host_BA = (float *)malloc(ann_size_BA * sizeof(float));
cudaMalloc(¶ms_device_AB, param_size * sizeof(int));
cudaMalloc(¶ms_device_BA, param_size * sizeof(int));
cudaMalloc(&ann_device_AB, ann_size_AB * sizeof(unsigned int));
cudaMalloc(&annd_device_AB, ann_size_AB * sizeof(float));
cudaMalloc(&ann_device_BA, ann_size_BA * sizeof(unsigned int));
cudaMalloc(&annd_device_BA, ann_size_BA * sizeof(float));
int numlayer = params.layers.size();
//feature match
for (int curr_layer = 0; curr_layer < numlayer - 1; curr_layer++)//from 32 to 512
{
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
cudaMemcpy(params_device_AB, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
//copy to device
cudaMemcpy(params_device_BA, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
dim3 blocksPerGridAB(data_A_size[curr_layer].width / 20 + 1, data_A_size[curr_layer].height / 20 + 1, 1);
dim3 threadsPerBlockAB(20, 20, 1);
ann_size_AB = data_A_size[curr_layer].width* data_A_size[curr_layer].height;
dim3 blocksPerGridBA(data_B_size[curr_layer].width / 20 + 1, data_B_size[curr_layer].height / 20 + 1, 1);
dim3 threadsPerBlockBA(20, 20, 1);
ann_size_BA = data_B_size[curr_layer].width* data_B_size[curr_layer].height;
//initialize ann if needed
if (curr_layer == 0)//initialize, rows and cols both less than 32, just use one block
{
initialAnn_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, params_device_AB);
initialAnn_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, params_device_BA);
}
else {//upsampling, notice this block's dimension is twice the ann at this point
unsigned int * ann_tmp;
cudaMalloc(&ann_tmp, ann_size_AB * sizeof(unsigned int));
upSample_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, ann_tmp, params_device_AB,
data_A_size[curr_layer - 1].width, data_A_size[curr_layer - 1].height);//get new ann_device
cudaMemcpy(ann_device_AB, ann_tmp, ann_size_AB * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaFree(ann_tmp);
cudaMalloc(&ann_tmp, ann_size_BA * sizeof(unsigned int));
upSample_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, ann_tmp, params_device_BA,
data_B_size[curr_layer - 1].width, data_B_size[curr_layer - 1].height);//get new ann_device
cudaMemcpy(ann_device_BA, ann_tmp, ann_size_BA * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaFree(ann_tmp);
}
//normarlize two data
float *Ndata_A, *Ndata_A1, *Ndata_B, *Ndata_BP;
float *response_A, *response_BP;
cudaMalloc(&Ndata_A, data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
cudaMalloc(&Ndata_A1, data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
cudaMalloc(&response_A, data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
cudaMalloc(&Ndata_B, data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
cudaMalloc(&Ndata_BP, data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
cudaMalloc(&response_BP, data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
norm(Ndata_A, data_A[curr_layer], response_A, data_A_size[curr_layer]);
norm(Ndata_BP, data_BP[curr_layer], response_BP, data_B_size[curr_layer]);
Mat temp1, temp2;
cv::resize(img_AL, temp1, cv::Size(data_A_size[curr_layer].width, data_A_size[curr_layer].height));
cv::resize(img_BPL, temp2, cv::Size(data_B_size[curr_layer].width, data_B_size[curr_layer].height));
Mat response1, response2;
response1 = Mat(temp1.size(), CV_32FC1);
response2 = Mat(temp2.size(), CV_32FC1);
cudaMemcpy(response1.data, response_A, data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(response2.data, response_BP, data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float), cudaMemcpyDeviceToHost);
Mat response_byte1, response_byte2;
response1.convertTo(response_byte1, CV_8UC1, 255);
response2.convertTo(response_byte2, CV_8UC1, 255);
blend << <blocksPerGridAB, threadsPerBlockAB >> >(response_A, data_A[curr_layer], data_A1[curr_layer], weight[curr_layer], params_device_AB);
blend << <blocksPerGridBA, threadsPerBlockBA >> >(response_BP, data_BP[curr_layer], data_B[curr_layer], weight[curr_layer], params_device_BA);
norm(Ndata_A1, data_A1[curr_layer], NULL, data_A_size[curr_layer]);
norm(Ndata_B, data_B[curr_layer], NULL, data_B_size[curr_layer]);
//patchmatch
cout << "Finding nearest neighbor field using PatchMatch Algorithm at layer:" << params.layers[curr_layer] << ".\n";
patchmatch << <blocksPerGridAB, threadsPerBlockAB >> >(Ndata_A1, Ndata_BP, Ndata_A, Ndata_B, ann_device_AB, annd_device_AB, params_device_AB);
patchmatch << <blocksPerGridBA, threadsPerBlockBA >> >(Ndata_B, Ndata_A, Ndata_BP, Ndata_A1, ann_device_BA, annd_device_BA, params_device_BA);
cudaFree(Ndata_A);
cudaFree(Ndata_A1);
cudaFree(Ndata_B);
cudaFree(Ndata_BP);
cudaFree(response_A);
cudaFree(response_BP);
//deconv
if (curr_layer < numlayer - 2)
{
int next_layer = curr_layer + 2;
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
cudaMemcpy(params_device_AB, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
//copy to device
cudaMemcpy(params_device_BA, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
blocksPerGridAB = dim3(data_A_size[curr_layer].width / 20 + 1, data_A_size[curr_layer].height / 20 + 1, 1);
threadsPerBlockAB = dim3(20, 20, 1);
ann_size_AB = data_A_size[curr_layer].width* data_A_size[curr_layer].height;
blocksPerGridBA = dim3(data_B_size[curr_layer].width / 20 + 1, data_B_size[curr_layer].height / 20 + 1, 1);
threadsPerBlockBA = dim3(20, 20, 1);
ann_size_BA = data_B_size[curr_layer].width* data_B_size[curr_layer].height;
int num1 = data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height;
int num2 = data_A_size[next_layer].channel*data_A_size[next_layer].width*data_A_size[next_layer].height;
float *target;
cudaMalloc(&target, num1 * sizeof(float));
avg_vote << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, data_BP[curr_layer], target, params_device_AB);
deconv(&classifier_A, params.layers[curr_layer], target, data_A_size[curr_layer], params.layers[next_layer], data_A1[next_layer], data_A_size[next_layer]);
cudaFree(target);
num1 = data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height;
num2 = data_B_size[next_layer].channel*data_B_size[next_layer].width*data_B_size[next_layer].height;
cudaMalloc(&target, num1 * sizeof(float));
avg_vote << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, data_A[curr_layer], target, params_device_BA);
deconv(&classifier_B, params.layers[curr_layer], target, data_B_size[curr_layer], params.layers[next_layer], data_B[next_layer], data_B_size[next_layer]);
cudaFree(target);
}
}
//upsample
int curr_layer = numlayer - 1;
{
//set parameters
params_host[0] = 3;//channels
params_host[1] = img_AL.rows;
params_host[2] = img_AL.cols;
params_host[3] = img_BPL.rows;
params_host[4] = img_BPL.cols;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
cudaMemcpy(params_device_AB, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
//set parameters
params_host[0] = 3;//channels
params_host[1] = img_BPL.rows;
params_host[2] = img_BPL.cols;
params_host[3] = img_AL.rows;
params_host[4] = img_AL.cols;
//copy to device
cudaMemcpy(params_device_BA, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
dim3 blocksPerGridAB(img_AL.cols / 20 + 1, img_AL.rows / 20 + 1, 1);
dim3 threadsPerBlockAB(20, 20, 1);
ann_size_AB = img_AL.cols* img_AL.rows;
dim3 blocksPerGridBA(img_BPL.cols / 20 + 1, img_BPL.rows / 20 + 1, 1);
dim3 threadsPerBlockBA(20, 20, 1);
ann_size_BA = img_BPL.rows* img_BPL.cols;
//updample
unsigned int * ann_tmp;
cudaMalloc(&ann_tmp, ann_size_AB * sizeof(unsigned int));
upSample_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, ann_tmp, params_device_AB,
data_A_size[curr_layer - 1].width, data_A_size[curr_layer - 1].height);//get new ann_device
cudaMemcpy(ann_device_AB, ann_tmp, ann_size_AB * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaFree(ann_tmp);
cudaMalloc(&ann_tmp, ann_size_BA * sizeof(unsigned int));
upSample_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, ann_tmp, params_device_BA,
data_B_size[curr_layer - 1].width, data_B_size[curr_layer - 1].height);//get new ann_device
cudaMemcpy(ann_device_BA, ann_tmp, ann_size_BA * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaFree(ann_tmp);
cudaMemcpy(ann_host_AB, ann_device_AB, ann_size_AB * sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(ann_host_BA, ann_device_BA, ann_size_BA * sizeof(unsigned int), cudaMemcpyDeviceToHost);
//free space in device, only need to free pa and pb which are created temporarily
//image downBAale
Mat flow, result_AB, result_BA, err, out, normal;
flow = reconstruct_dflow(img_AL, img_BPL, ann_host_AB, sizes[curr_layer]);
result_AB = reconstruct_avg(img_AL, img_BPL, ann_host_AB, sizes[curr_layer]);
cv::resize(result_AB, out, Size(), (float)ori_A_cols / cur_A_cols, (float)ori_A_rows / cur_A_rows, INTER_CUBIC);
sprintf(fname, "resultAB.png");
imwrite(path_output + fname, out);
flow = reconstruct_dflow(img_BPL, img_AL, ann_host_BA, sizes[curr_layer]);
result_BA = reconstruct_avg(img_BPL, img_AL, ann_host_BA, sizes[curr_layer]);
cv::resize(result_BA, out, Size(), (float)ori_BP_cols / cur_BP_cols, (float)ori_BP_rows / cur_BP_rows, INTER_CUBIC);
sprintf(fname, "resultBA.png");
imwrite(path_output + fname, out);
if (photoTransfer)
{
cout << "Refining photo transfer." << endl;
Mat filtered_AB, filtered_BA, filtered_A, filtered_B, refine_AB, refine_BA;
Mat origin_A, origin_B, res_AB, res_BA;
img_AL.convertTo(origin_A, CV_32FC3, 1/255.0);
img_BPL.convertTo(origin_B, CV_32FC3, 1 / 255.0);
result_AB.convertTo(res_AB, CV_32FC3, 1 / 255.0);
result_BA.convertTo(res_BA, CV_32FC3, 1 / 255.0);
WeightedLeastSquare(filtered_AB, origin_A, res_AB);
WeightedLeastSquare(filtered_BA, origin_B, res_BA);
WeightedLeastSquare(filtered_A, origin_A, origin_A);
WeightedLeastSquare(filtered_B, origin_B, origin_B);
refine_AB = origin_A + filtered_AB - filtered_A;
refine_BA = origin_B + filtered_BA - filtered_B;
sprintf(fname, "refineAB.png");
refine_AB.convertTo(normal, CV_32FC3, 255.0);
cv::resize(normal, out, Size(), (float)ori_A_cols / cur_A_cols, (float)ori_A_rows / cur_A_rows, INTER_CUBIC);
imwrite(path_output + fname, out);
sprintf(fname, "refineBA.png");
refine_BA.convertTo(normal, CV_32FC3, 255.0);
cv::resize(normal, out, Size(), (float)ori_BP_cols / cur_BP_cols, (float)ori_BP_rows / cur_BP_rows, INTER_CUBIC);
imwrite(path_output + fname, out);
}
}
cout << "Saving flow result." << "\n";
//save ann
{
ofstream output1;
char fname[256];
sprintf(fname, "flowAB.txt");
output1.open(path_output + fname);
for (int y = 0; y < img_AL.rows; y++)
for (int x = 0; x < img_AL.cols; x++)
{
unsigned int v = ann_host_AB[y*img_AL.cols + x];
int xbest = INT_TO_X(v);
int ybest = INT_TO_Y(v);
output1 << xbest - x << " " << ybest - y << endl;
}
output1.close();
ofstream output2;
sprintf(fname, "flowBA.txt");
output2.open(path_output + fname);
for (int y = 0; y < img_BPL.rows; y++){
for (int x = 0; x < img_BPL.cols; x++)
{
unsigned int v = ann_host_BA[y*img_BPL.cols + x];
int xbest = INT_TO_X(v);
int ybest = INT_TO_Y(v);
output2 << xbest - x << " " << ybest - y << endl;
}
}
output2.close();
}
cudaFree(params_device_AB);
cudaFree(ann_device_AB);
cudaFree(annd_device_AB);
cudaFree(params_device_BA);
cudaFree(ann_device_BA);
cudaFree(annd_device_BA);
free(ann_host_AB);
free(annd_host_AB);
free(ann_host_BA);
free(annd_host_BA);
free(params_host);
for (int i = 0; i < numlayer; i++)
{
cudaFree(data_A[i]);
cudaFree(data_BP[i]);
}
finish = clock();
duration = (double)(finish - start) / CLOCKS_PER_SEC;
cout << "Finished finding ann. Time : " << duration << endl;
google::ShutdownGoogleLogging();
classifier_A.DeleteNet();
classifier_B.DeleteNet();
}
|
f09cd46d4b2220111ef2d20254cae175f211fd53.hip
|
// !!! This is a file automatically generated by hipify!!!
/***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* CUDA accelerated coulombic potential grid test code
* John E. Stone <[email protected]>
* http://www.ks.uiuc.edu/~johns/
*
* Coulombic potential grid calculation microbenchmark based on the time
* consuming portions of the 'cionize' ion placement tool.
*/
#ifdef __MCUDA__
#include <mcuda.h>
#else
#include <hip/hip_runtime.h>
#endif
#include <parboil.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuenergy.h"
/* initatoms()
* Store a pseudorandom arrangement of point charges in *atombuf.
*/
static int
initatoms(double **atombuf, int count, dim3 volsize, double gridspacing) {
dim3 size;
int i;
double *atoms;
srand(54321); // Ensure that atom placement is repeatable
atoms = (double *) malloc(count * 4 * sizeof(double));
*atombuf = atoms;
// compute grid dimensions in angstroms
size.x = gridspacing * volsize.x;
size.y = gridspacing * volsize.y;
size.z = gridspacing * volsize.z;
for (i=0; i<count; i++) {
int addr = i * 4;
atoms[addr ] = (rand() / (double) RAND_MAX) * size.x;
atoms[addr + 1] = (rand() / (double) RAND_MAX) * size.y;
atoms[addr + 2] = (rand() / (double) RAND_MAX) * size.z;
atoms[addr + 3] = ((rand() / (double) RAND_MAX) * 2.0) - 1.0; // charge
}
return 0;
}
/* writeenergy()
* Write part of the energy array to an output file for verification.
*/
static int
writeenergy(char *filename, double *energy, dim3 volsize)
{
FILE *outfile;
int x, y;
outfile = fopen(filename, "w");
if (outfile == NULL) {
fputs("Cannot open output file\n", stderr);
return -1;
}
/* Print the execution parameters */
fprintf(outfile, "%d %d %d %d\n", volsize.x, volsize.y, volsize.z, ATOMCOUNT);
/* Print a checksum */
{
double sum = 0.0;
for (y = 0; y < volsize.y; y++) {
for (x = 0; x < volsize.x; x++) {
double t = energy[y*volsize.x+x];
t = fmax(-20.0, fmin(20.0, t));
sum += t;
}
}
fprintf(outfile, "%.6E\n", sum);
}
/* Print several rows of the computed data */
//for (y = 0; y < 17; y++) {
for (y = 0; y < volsize.y; y++) { //need to print all
for (x = 0; x < volsize.x; x++) {
int addr = y * volsize.x + x;
fprintf(outfile, "%.6E ", energy[addr]);
}
fprintf(outfile, "\n");
}
fclose(outfile);
return 0;
}
int main(int argc, char** argv) {
struct pb_TimerSet timers;
struct pb_Parameters *parameters;
double *energy = NULL; // Output of device calculation
double *atoms = NULL;
dim3 volsize, Gsz, Bsz;
// int final_iteration_count;
// number of atoms to simulate
int atomcount = ATOMCOUNT;
// voxel spacing
const double gridspacing = 0.1;
// Size of buffer on GPU
int volmemsz;
printf("CUDA accelerated coulombic potential microbenchmark\n");
printf("Original version by John E. Stone <[email protected]>\n");
printf("This version maintained by Chris Rodrigues\n");
parameters = pb_ReadParameters(&argc, argv);
if (!parameters)
return -1;
if (parameters->inpFiles[0]) {
fputs("No input files expected\n", stderr);
return -1;
}
pb_InitializeTimerSet(&timers);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
// setup energy grid size
volsize.x = VOLSIZEX;
volsize.y = VOLSIZEY;
volsize.z = 1;
// setup CUDA grid and block sizes
Bsz.x = BLOCKSIZEX; // each thread does multiple Xs
Bsz.y = BLOCKSIZEY;
Bsz.z = 1;
Gsz.x = volsize.x / (Bsz.x * UNROLLX); // each thread does multiple Xs
Gsz.y = volsize.y / Bsz.y;
Gsz.z = volsize.z / Bsz.z;
#if 1
printf("Grid size: %d x %d x %d\n", volsize.x, volsize.y, volsize.z);
printf("Running kernel(atoms:%d, gridspacing %g, z %d)\n", atomcount, gridspacing, 0);
#endif
// allocate and initialize atom coordinates and charges
if (initatoms(&atoms, atomcount, volsize, gridspacing))
return -1;
// allocate and initialize the GPU output array
volmemsz = sizeof(double) * volsize.x * volsize.y * volsize.z;
// Main computation
{
double *d_output = NULL; // Output on device
int iterations=0;
int atomstart;
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
hipMalloc((void**)&d_output, volmemsz);
CUERR // check and clear any existing errors
hipMemset(d_output, 0, volmemsz);
CUERR // check and clear any existing errors
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
for (atomstart=0; atomstart<atomcount; atomstart+=MAXATOMS) {
int atomsremaining = atomcount - atomstart;
int runatoms = (atomsremaining > MAXATOMS) ? MAXATOMS : atomsremaining;
iterations++;
// copy the atoms to the GPU
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
if (copyatomstoconstbuf(atoms + 4*atomstart, runatoms, 0*gridspacing))
return -1;
pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
// RUN the kernel...
hipLaunchKernelGGL(( cenergy), dim3(Gsz), dim3(Bsz), 0, 0, runatoms, 0.1, d_output);
CUERR // check and clear any existing errors
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
// final_iteration_count = iterations;
}
#if 0
printf("Done\n");
#endif
// Copy the GPU output data back to the host and use/store it..
energy = (double *) malloc(volmemsz);
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
hipMemcpy(energy, d_output, volmemsz, hipMemcpyDeviceToHost);
CUERR // check and clear any existing errors
hipFree(d_output);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
}
/* Print a subset of the results to a file */
if (parameters->outFile) {
pb_SwitchToTimer(&timers, pb_TimerID_IO);
if (writeenergy(parameters->outFile, energy, volsize) == -1)
return -1;
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
}
free(atoms);
free(energy);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
pb_PrintTimerSet(&timers);
pb_FreeParameters(parameters);
return 0;
}
|
f09cd46d4b2220111ef2d20254cae175f211fd53.cu
|
/***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* CUDA accelerated coulombic potential grid test code
* John E. Stone <[email protected]>
* http://www.ks.uiuc.edu/~johns/
*
* Coulombic potential grid calculation microbenchmark based on the time
* consuming portions of the 'cionize' ion placement tool.
*/
#ifdef __MCUDA__
#include <mcuda.h>
#else
#include <cuda.h>
#endif
#include <parboil.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuenergy.h"
/* initatoms()
* Store a pseudorandom arrangement of point charges in *atombuf.
*/
static int
initatoms(double **atombuf, int count, dim3 volsize, double gridspacing) {
dim3 size;
int i;
double *atoms;
srand(54321); // Ensure that atom placement is repeatable
atoms = (double *) malloc(count * 4 * sizeof(double));
*atombuf = atoms;
// compute grid dimensions in angstroms
size.x = gridspacing * volsize.x;
size.y = gridspacing * volsize.y;
size.z = gridspacing * volsize.z;
for (i=0; i<count; i++) {
int addr = i * 4;
atoms[addr ] = (rand() / (double) RAND_MAX) * size.x;
atoms[addr + 1] = (rand() / (double) RAND_MAX) * size.y;
atoms[addr + 2] = (rand() / (double) RAND_MAX) * size.z;
atoms[addr + 3] = ((rand() / (double) RAND_MAX) * 2.0) - 1.0; // charge
}
return 0;
}
/* writeenergy()
* Write part of the energy array to an output file for verification.
*/
static int
writeenergy(char *filename, double *energy, dim3 volsize)
{
FILE *outfile;
int x, y;
outfile = fopen(filename, "w");
if (outfile == NULL) {
fputs("Cannot open output file\n", stderr);
return -1;
}
/* Print the execution parameters */
fprintf(outfile, "%d %d %d %d\n", volsize.x, volsize.y, volsize.z, ATOMCOUNT);
/* Print a checksum */
{
double sum = 0.0;
for (y = 0; y < volsize.y; y++) {
for (x = 0; x < volsize.x; x++) {
double t = energy[y*volsize.x+x];
t = fmax(-20.0, fmin(20.0, t));
sum += t;
}
}
fprintf(outfile, "%.6E\n", sum);
}
/* Print several rows of the computed data */
//for (y = 0; y < 17; y++) {
for (y = 0; y < volsize.y; y++) { //need to print all
for (x = 0; x < volsize.x; x++) {
int addr = y * volsize.x + x;
fprintf(outfile, "%.6E ", energy[addr]);
}
fprintf(outfile, "\n");
}
fclose(outfile);
return 0;
}
int main(int argc, char** argv) {
struct pb_TimerSet timers;
struct pb_Parameters *parameters;
double *energy = NULL; // Output of device calculation
double *atoms = NULL;
dim3 volsize, Gsz, Bsz;
// int final_iteration_count;
// number of atoms to simulate
int atomcount = ATOMCOUNT;
// voxel spacing
const double gridspacing = 0.1;
// Size of buffer on GPU
int volmemsz;
printf("CUDA accelerated coulombic potential microbenchmark\n");
printf("Original version by John E. Stone <[email protected]>\n");
printf("This version maintained by Chris Rodrigues\n");
parameters = pb_ReadParameters(&argc, argv);
if (!parameters)
return -1;
if (parameters->inpFiles[0]) {
fputs("No input files expected\n", stderr);
return -1;
}
pb_InitializeTimerSet(&timers);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
// setup energy grid size
volsize.x = VOLSIZEX;
volsize.y = VOLSIZEY;
volsize.z = 1;
// setup CUDA grid and block sizes
Bsz.x = BLOCKSIZEX; // each thread does multiple Xs
Bsz.y = BLOCKSIZEY;
Bsz.z = 1;
Gsz.x = volsize.x / (Bsz.x * UNROLLX); // each thread does multiple Xs
Gsz.y = volsize.y / Bsz.y;
Gsz.z = volsize.z / Bsz.z;
#if 1
printf("Grid size: %d x %d x %d\n", volsize.x, volsize.y, volsize.z);
printf("Running kernel(atoms:%d, gridspacing %g, z %d)\n", atomcount, gridspacing, 0);
#endif
// allocate and initialize atom coordinates and charges
if (initatoms(&atoms, atomcount, volsize, gridspacing))
return -1;
// allocate and initialize the GPU output array
volmemsz = sizeof(double) * volsize.x * volsize.y * volsize.z;
// Main computation
{
double *d_output = NULL; // Output on device
int iterations=0;
int atomstart;
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
cudaMalloc((void**)&d_output, volmemsz);
CUERR // check and clear any existing errors
cudaMemset(d_output, 0, volmemsz);
CUERR // check and clear any existing errors
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
for (atomstart=0; atomstart<atomcount; atomstart+=MAXATOMS) {
int atomsremaining = atomcount - atomstart;
int runatoms = (atomsremaining > MAXATOMS) ? MAXATOMS : atomsremaining;
iterations++;
// copy the atoms to the GPU
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
if (copyatomstoconstbuf(atoms + 4*atomstart, runatoms, 0*gridspacing))
return -1;
pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
// RUN the kernel...
cenergy<<<Gsz, Bsz, 0>>>(runatoms, 0.1, d_output);
CUERR // check and clear any existing errors
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
// final_iteration_count = iterations;
}
#if 0
printf("Done\n");
#endif
// Copy the GPU output data back to the host and use/store it..
energy = (double *) malloc(volmemsz);
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
cudaMemcpy(energy, d_output, volmemsz, cudaMemcpyDeviceToHost);
CUERR // check and clear any existing errors
cudaFree(d_output);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
}
/* Print a subset of the results to a file */
if (parameters->outFile) {
pb_SwitchToTimer(&timers, pb_TimerID_IO);
if (writeenergy(parameters->outFile, energy, volsize) == -1)
return -1;
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
}
free(atoms);
free(energy);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
pb_PrintTimerSet(&timers);
pb_FreeParameters(parameters);
return 0;
}
|
6cc7774d63fb306ff93e6476bf6a4e0861f7acba.hip
|
// !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#include "../NativeOps.h"
#include <hip/hip_runtime.h>
#include <cuda_launch_config.h>
#include <buffer.h>
#include <helpers/shape.h>
#include "../Environment.h"
#include <helpers/TAD.h>
#include <ops/specials.h>
#include <loops/reduce3.h>
#include <loops/indexreduce.h>
#include <loops/summarystatsreduce.h>
#include <loops/random.h>
#include <loops/broadcasting.h>
#include <loops/broadcasting_bool.h>
#include <loops/scalar.h>
#include <loops/scalar_bool.h>
#include <loops/pairwise_transform.h>
#include <loops/pairwise_bool.h>
#include <loops/transform_same.h>
#include <loops/transform_float.h>
#include <loops/transform_strict.h>
#include <loops/transform_bool.h>
#include <loops/transform_any.h>
#include <loops/reduce_float.h>
#include <loops/reduce_same.h>
#include <loops/reduce_bool.h>
#include <loops/reduce_long.h>
//#include <thread>
#include <map>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
#include <pointercast.h>
#include <stdio.h>
#include <stdlib.h>
#include <loops/type_conversions.h>
#include <op_boilerplate.h>
#include <loops/aggregates.h>
#include <helpers/threshold.h>
#include <ShapeList.h>
#include <Context.h>
#include <ops/specials_cuda.h>
#include <graph/exceptions/datatype_exception.h>
#include <helpers/CudaLaunchHelper.h>
// FIXME: we need cuda-specific implementations
#include <helpers/logger.h>
#include <NDArray.h>
#include <GraphExecutioner.h>
#include <graph/GraphHolder.h>
#include <graph/VariablesSet.h>
#include <ops/declarable/OpRegistrator.h>
#include <ops/declarable/CustomOperations.h>
//#include <sys/time.h>
// b40c only available for gcc :(
#ifdef __clang__
// do nothing
#elif __GNUC__
#include <b40c/util/error_utils.cuh>
#include <b40c/util/multiple_buffering.cuh>
#include <b40c/radix_sort/enactor.cuh>
#endif
#include <hiprand/hiprand.h>
#include <Status.h>
#include <helpers/DebugHelper.h>
using namespace nd4j;
#include <loops/special_kernels.h>
hipDeviceProp_t *deviceProperties;
hipFuncAttributes *funcAttributes = new hipFuncAttributes[64];
int blockLimit = 128;
int maxThreads = 512;
bool allowedP2P = false;
bool supportedP2P = false;
#ifdef __ND4J_EXPERIMENTAL__
bool experimentalSupport = true;
#else
bool experimentalSupport = false;
#endif
int minThreads = 32;
__constant__ char deviceConstantMemory[49152];
typedef struct {
long streamId;
long callId;
} __syncInfo;
typedef __syncInfo SyncInfo;
/**
* This is utility kernel, that updates given special buffer with proper values in device memory
*/
extern "C" __global__ void prepareShapeBuffer(int *dimension, int *maxDimension, Nd4jLong *specialPointer, int rows, nd4j::DataType dataType) {
Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid > 0)
return;
dimension[0] = 0;
maxDimension[0] = 1;
specialPointer[0] = 2;
specialPointer[1] = rows;
specialPointer[2] = 1;
specialPointer[3] = 1;
specialPointer[4] = 1;
specialPointer[5] = 0;
specialPointer[6] = 1;
specialPointer[7] = 99;
ArrayOptions::setDataType(specialPointer, dataType);
//printf("special[0]: [%lld]\n", (long long) specialPointer[0]);
//shape::printShapeInfoLinear("prepareShapeBuffer", specialPointer);
}
// this method isn't used, left here for legacy and caution purposes
// TLDR: don't use this way, it sucks
void CUDART_CB syncCallback(hipStream_t stream, hipError_t status, void *data){
SyncInfo *sync = reinterpret_cast<SyncInfo *>(data);
//printf("Finished stream: [%i], kernel call: [%i]\n", sync->streamId, sync->callId);
}
// this method just does type conversion in fancy way
int getDeviceId(Nd4jPointer ptrToDeviceId) {
return (int)(Nd4jLong)ptrToDeviceId;
}
template <typename T>
dim3 getOptimalDimensions(Nd4jLong n,hipFuncAttributes attributes, hipDeviceProp_t properties) {
// we can combine the two to compute a block size
int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties);
// no real sense launching more threads, then number of elements we have
if (num_threads > n) num_threads = n;
if (maxThreads > 0 && num_threads > maxThreads) num_threads = maxThreads;
// compute the number of blocks of size num_threads to launch
int num_blocks = n / num_threads;
// check for partial block at the end
if (num_blocks > blockLimit) num_blocks = blockLimit;
if (num_blocks < 4 && n > 128) {
num_blocks = 4;
num_threads = n / num_blocks;
}
if (num_threads >= 768) {
num_blocks = num_blocks * 2;
num_threads = num_threads / 2;
}
if(n % num_threads && num_blocks < blockLimit) ++num_blocks;
//(num_threads * sizeof(T)) + attributes.sharedSizeBytes);
return dim3(num_blocks,num_threads, 3000);
}
int getBaseMemorySize(int xRank, hipFuncAttributes funcAttr) {
int memory_limit = 256; //funcAttr.sharedSizeBytes;
// TODO: remove this later
memory_limit += sizeof(UnifiedSharedMemory) + 32; // sizeof(shape::TAD) + (xRank * 4 * 4)
/*
if (xRank == 0) xRank = 2;
memory_limit += (xRank * 2 + 4) * 3 * 4; // we reserve memory for xShape + T1/T2 shapes
memory_limit += yRank == 0 ? 0 : (yRank * 2 + 4) * 4;
memory_limit += zRank == 0 ? 0 : (zRank * 2 + 4) * 4;
memory_limit += (xRank * 4) * 6;
memory_limit += MAX_RANK * 4; // special case, needed roughtly in one pase
*/
return memory_limit;
}
/*
* Basic CUDA constants here: number of blocks per MP
*/
int getDeviceBlockThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
int blockThreshold = 8;
if (ccMajor >= 5)
blockThreshold = 32;
else if (ccMajor == 3)
blockThreshold = 16;
else if (ccMajor < 3)
blockThreshold = 8;
return blockThreshold;
}
dim3 getBasicLaunchParams(int deviceId, long problemLength, int sharedMemoryPerThread, hipFuncAttributes funcAttr) {
int countMP = deviceProperties[deviceId].multiProcessorCount;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int num_threads = problemLength / (countMP * blockThreshold);
num_threads = nd4j::math::nd4j_min<int>(num_threads, maxThreads);
num_threads = nd4j::math::nd4j_max<int>(num_threads, 64);
num_threads = nd4j::math::nd4j_max<int>(num_threads, minThreads);
int num_blocks = nd4j::math::nd4j_max<int>(problemLength / num_threads, 1);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
int memory_limit = (sharedMemoryPerThread * num_threads) + getBaseMemorySize(1, funcAttr);
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary basic launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i]\n", num_blocks, num_threads, memory_limit);
return launchDims;
}
/*
* This message returns shared memory threshold value. default overflow ratio is 0.3
*/
int getDeviceSharedThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
// please note threshold isn't multiple of 32, and that's NOT a mistake
int shmemThreshold;
if (ccMajor == 6 && ccMinor == 0)
shmemThreshold = 65536;
else if (ccMajor == 6 && ccMinor == 1)
shmemThreshold = 49152;
else if (ccMajor == 5 && ccMinor == 2)
shmemThreshold = 98304;
else if (ccMajor == 5)
shmemThreshold = 65536;
else if (ccMajor == 3 && ccMinor == 7)
shmemThreshold = 114688;
else shmemThreshold = 49152;
return shmemThreshold / 0.3;
}
dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, hipFuncAttributes funcAttr, int dimensionLength, int elementSize, int reduction) {
int num_threads = nd4j::math::nd4j_min<int>(tadLength, maxThreads);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int warpSize = deviceProperties[deviceId].warpSize;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
// round num_threads to nearest warpSize
num_threads -= num_threads % warpSize;
num_threads = nd4j::math::nd4j_max<int>(1, num_threads);
if (num_threads < warpSize && tadLength < warpSize)
num_threads = tadLength;
// since we use shared memory as fast memory for some cases - we need to count that in
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int memory_floor = memory_limit;
int effective_block_limit = countMP * blockThreshold;
int num_blocks = numTads; //nd4j::math::nd4j_min<int>(numTads, effective_block_limit);
int desiredShared = shmemThreshold / nd4j::math::nd4j_max<int>((num_blocks / countMP), 1);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Launch context: numBlocks: [%i], numThreads: [%i], countMap: [%i], shmemThreshold: [%i], desiredShared: [%i], elementSize: [%i]\n", num_blocks, num_threads, countMP, shmemThreshold, desiredShared, elementSize);
// at this moment we've stored all required information for things. time to count in reduction multipliers
int reduction_per_block = 0;
bool found = false;
if (reduction > 0)
while (!found) {
reduction_per_block = (num_threads * elementSize * reduction);
if (memory_limit + reduction_per_block < desiredShared) {
memory_limit += reduction_per_block;
found = true;
} else {
if (num_threads > minThreads) {
num_threads -= 32;
} else {
memory_limit += reduction_per_block;
found = true;
}
}
}
// at this moment we know total memory used per block, and we also know per-mp limit.
int max_active_blocks = shmemThreshold / nd4j::math::nd4j_max<int>(memory_limit, 1);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("MAB: [%i], memory_floor: [%i], memory_limit: [%i], reductionPerBlock: [%i]\n", max_active_blocks, memory_floor, memory_limit, reduction_per_block);
// we don't want to spawn more blocks, that gpu can actually handle without queue
//num_blocks = nd4j::math::nd4j_min<int>(num_blocks, max_active_blocks);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
// if (num_blocks > countMP)
// num_blocks = num_blocks - (num_blocks % countMP);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= minThreads)
break;
num_threads -= 32;
}
reduction_per_block = (num_threads * elementSize * reduction);
memory_limit = memory_floor + reduction_per_block;
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary reduce launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], reduction_per_block: [%i], blocksPerMP: [%i]\n", num_blocks, num_threads, memory_limit, reduction_per_block, targetBlocksPerMP);
return dim3(num_blocks,num_threads, memory_limit);
}
/*
* This method returns kernel launch param for linear memory access
*/
dim3 getFlatLaunchParams(int deviceId, Nd4jLong *dXShapeInfo, Nd4jLong *dYShapeInfo, hipFuncAttributes funcAttr) {
auto xRank = shape::rank(dXShapeInfo);
auto yRank = dYShapeInfo == nullptr ? 0 : shape::rank(dYShapeInfo);
auto zRank = 0;
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
auto xLength = shape::length(dXShapeInfo);
int effective_block_limit = countMP * blockThreshold;
// for flat calls we just want as much concurrent blocks, as possible, and we're not tied to TAD here
int num_threads = xLength / effective_block_limit;
if (num_threads < minThreads)
num_threads = minThreads;
num_threads = num_threads - (num_threads % 32);
int memory_floor = memory_limit;
int num_blocks = xLength / num_threads;
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
// num_blocks = nd4j::math::nd4j_min<int>(num_blocks, effective_block_limit);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048 && num_threads >= 128) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= minThreads)
break;
num_threads -= 32;
}
}
if (xLength / num_threads > blockLimit)
num_blocks *= 2;
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary scalar launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], blocksPerMP: [%i], problemLength: [%i], effectiveBlockLimit: [%i]\n", num_blocks, num_threads, memory_limit, targetBlocksPerMP, xLength, effective_block_limit);
return launchDims;
}
/**
* This method returns kernel launch params with TAD-based memory access
*
* @param deviceId
* @param dXShapeInfo
* @param tadShapeInfo
* @param funcAttr
* @param dimensionLength
* @param elementSize
* @param reductionSize
* @return
*/
dim3 getReduceLaunchParams(int deviceId, Nd4jLong *dXShapeInfo, Nd4jLong *tadShapeInfo, hipFuncAttributes funcAttr, int dimensionLength, int elementSize, int reductionSize) {
Nd4jLong tadLength = 0;
Nd4jLong numTads = 0;
if (tadShapeInfo != nullptr) {
tadLength = shape::length(tadShapeInfo);
numTads = shape::length(dXShapeInfo) / tadLength;
if (tadLength == 1) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("A xLength: [%i], zLength: [%i]\n", shape::length(dXShapeInfo), shape::length(tadShapeInfo));
}
} else{
// we have special case - reduction along all dimensions
tadLength = nd4j::math::nd4j_min<int>(shape::length(dXShapeInfo), 768);
numTads = shape::length(dXShapeInfo) / tadLength;
}
auto xRank = shape::rank(dXShapeInfo);
int zRank = tadShapeInfo == nullptr ? 0 : shape::rank(tadShapeInfo);
dim3 launchDims = getBetterDimensions(deviceId, numTads, tadLength, xRank, funcAttr, dimensionLength, elementSize, reductionSize);
if (nd4j::Environment::getInstance()->isDebugAndVerbose()) { //|| launchDims.dX == 1
printf("Reduce LaunchParams: xLength: [%i], numTads: [%i], tadLength: [%i], launchDims.dX: [%i], launchDims.dY: [%i], launchDims.dZ: [%i]\n", shape::length(dXShapeInfo), numTads, tadLength, launchDims.x, launchDims.y, launchDims.z);
}
return launchDims;
}
/**
* Returns optimal launch parameters
* given the extra pointers passed in.
* The extra pointer should be
* the host pointer for the shape information
* associated with the data.
* From there it is used to obtain the length
* from which we can derive the optimal launch parameters.
*
*/
template <typename T>
dim3 getOptimalLaunchParameters(const Nd4jLong *hXShapeInfo, hipFuncAttributes attributes, hipDeviceProp_t properties) {
auto n = shape::length(hXShapeInfo);
dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y));
return launchDims;
}
nd4j::buffer::Buffer<Nd4jLong> * createScalarBuffer(hipStream_t stream) {
Nd4jLong *scalarShapeInfo = shape::createScalarShapeInfo();
nd4j::buffer::Buffer<Nd4jLong> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream);
nd4j::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
nd4j::buffer::Buffer<Nd4jLong> *scalarDimension;
nd4j::buffer::Buffer<Nd4jLong> *scalarShapeInfo;
// std::thread::id threadId;
public:
ScalarShapeInformation(hipStream_t stream) {
auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong)));
CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer");
scalarDimensionBuff[0] = MAX_DIMENSION;
scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream);
scalarShapeInfo = createScalarBuffer(stream);
// threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
nd4j::buffer::freeBuffer(&scalarShapeInfo);
nd4j::buffer::freeBuffer(&scalarDimension);
}
Nd4jLong *getShapeInfoHostPointer() {
return scalarShapeInfo->data;
}
Nd4jLong * getShapeInfoGpuPointer() {
return scalarShapeInfo->gData;
}
Nd4jLong * getDimensionHostPointer() {
return scalarDimension->data;
}
Nd4jLong * getDimensionGpuPointer() {
return scalarDimension->gData;
}
};
template <typename T>
class ScalarInfo {
nd4j::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
hipStream_t streamRef;
public:
ScalarInfo(hipStream_t stream) {
T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T)));
CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer");
shapeInfo = new ScalarShapeInformation(stream);
scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream);
streamRef = stream;
nd4j::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
nd4j::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
Nd4jLong *getDeviceShapeInfo() {
return shapeInfo->getShapeInfoGpuPointer();
}
/**
* Get the dZ pointers
*/
T *getDevicePointer() {
return scalarData->gData;
}
/**
* Get the infinite dimension device pointer
*/
Nd4jLong *getDimensionDevicePointer() {
return shapeInfo->getDimensionGpuPointer();
}
~ScalarInfo() {
nd4j::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
void NativeOps::execPairwiseTransform(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
dim3 launchDims(256, 1024, 8192);
if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled())
throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransform both operands must have same data type", xType, yType);
if (xType != zType && yType != zType)
throw std::runtime_error("NativeOps::execPairwiseTransform requires Z operand to have either X or Y type");
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dY, dYShapeInfo, hYShapeInfo, dZ, dZShapeInfo, hZShapeInfo, extraParams), LIBND4J_TYPES, LIBND4J_TYPES)
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dY, dYShapeInfo, hYShapeInfo, dZ, dZShapeInfo, hZShapeInfo, extraParams), LIBND4J_TYPES)
#endif
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execPairwiseTransformBool(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isB(zType))
throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransformBool wrong Z operand data type", nd4j::DataType::BOOL, zType);
if (yType != xType)
throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransformBool both operands must have same data type", xType, yType);
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims(256, 1024, 16384);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::pairwise_transforms::PairWiseBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams), LIBND4J_TYPES, BOOL_TYPES)
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execSummaryStatsScalar(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
bool biasCorrected) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
dim3 launchDims = dim3(256, 256, 32768);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, reductionPointer), LIBND4J_TYPES, FLOAT_TYPES);
}
void NativeOps::execBroadcastBool(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension, int dimensionLength) {
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto dTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto dTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (!DataTypeUtils::isB(zType))
throw std::runtime_error("NativeOps::execBroadcastBool requires Z operand to have BOOL type");
if (yType != xType)
throw std::runtime_error("NativeOps::execBroadcastBool requires both X & Y operands to have same type");
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F3 opNum:[%i]\n", opNum);
dim3 launchDims(256, 256, 16384);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::broadcast::BroadcastBool, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES, BOOL_TYPES)
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param dY
* @param dYShapeInfo
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcast(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension, int dimensionLength) {
/*
hipEvent_t start;
hipEventCreateWithFlags(&start, hipEventDisableTiming);
timespec tsX;
timespec tsY;
clock_gettime(CLOCK_REALTIME, &tsX);
*/
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto dTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto dTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F3 opNum:[%i]\n", opNum);
dim3 launchDims(256, 256, 16384);
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
void NativeOps::execReduceFloat(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("FF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isR(zType))
throw std::runtime_error("NativeOps::execReduceFloat requires Z operand to have floating point type");
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, FLOAT_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceFloat(...) failed");
}
void NativeOps::execReduceSame(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("SF8 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != xType)
throw datatype_exception::build("NativeOps::execReduceSame requires both X & Z operands to have same type", xType, zType);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceSame(...) failed");
}
void NativeOps::execReduceSame(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension, int dimensionLength) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("SF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto xRank = shape::rank(hXShapeInfo);
if (zType != xType)
throw datatype_exception::build("NativeOps::execReduceSame requires both X & Z operands to have same type", xType, zType);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceSame(...) failed");
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduceLong(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension,int dimensionLength) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("LF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != nd4j::DataType::INT64)
throw datatype_exception::build("NativeOps::execReduceLong wrong Z data type", nd4j::DataType::INT64, zType);
auto xRank = shape::rank(hXShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, LONG_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed");
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduceLong(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("LF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != nd4j::DataType::INT64)
throw datatype_exception::build("NativeOps::execReduceLong wrong Z data type", nd4j::DataType::INT64, zType);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, LONG_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed");
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduceBool(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension, int dimensionLength) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("BF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != nd4j::DataType::BOOL)
throw std::runtime_error("NativeOps::execReduceBool requires Z operand to have BOOL type");
auto xRank = shape::rank(hXShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, BOOL_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed");
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduceBool(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("BF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != nd4j::DataType::BOOL)
throw std::runtime_error("NativeOps::execReduceBool requires Z operand to have BOOL type");
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, BOOL_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed");
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduce(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension,int dimensionLength) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
Nd4jLong *hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
Nd4jLong *dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
Nd4jLong *dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F2 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
if (zType != nd4j::DataType::INT64)
throw datatype_exception::build("NativeOps::execIndexReduce requires Z operand to have INT64 type", zType);
auto dz = reinterpret_cast<Nd4jLong*>(dZ);
BUILD_SINGLE_SELECTOR(xType, functions::indexreduce::IndexReduce, ::executeIndexReduce(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, dZShapeInfo, shape::rank(hZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES);
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension,int dimensionLength) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F8 opNum:[%i]\n", opNum);
void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto xRank = shape::rank(hXShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX,dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, FLOAT_TYPES);
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
*/
void NativeOps::execIndexReduceScalar(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo){
if (nd4j::Environment::getInstance()->isDebug())
printf("F1 opNum:[%i]\n", opNum);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
// void *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1)
printf("AF1 opNum:[%i]\n", opNum);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
// FIXME: we want Z to be one of integer types
//if (!DataTypeUtils::isZ(zType))
// throw nd4j::datatype_exception("NativeOps::execIndexReduceScalar requires Z operand to have one of integer types")
if (zType != nd4j::DataType::INT64)
throw nd4j::datatype_exception::build("NativeOps::execIndexReduceScalar requires Z operand to have INT64 data type", zType);
auto dz = reinterpret_cast<Nd4jLong*>(dZ);
BUILD_SINGLE_SELECTOR(xType, functions::indexreduce::IndexReduce, ::executeIndexReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, nullptr, 0, nullptr, 0, 1, allocationPointer, reductionPointer, nullptr, nullptr), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execIndexReduceScalar(...) failed");
}
void NativeOps::execTransformSame(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims(512, 512, 16384);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (xType != zType)
throw std::runtime_error("NativeOps::execTransformSame requires X & Z to have same type");
//nd4j_printf("Going to execute transformSame; opNum: %i\n", opNum);
BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformSame, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execTransformSame(...) failed");
}
void NativeOps::execTransformBool(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims(512, 512, 16384);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isB(zType))
throw std::runtime_error("NativeOps::execTransformBool requires Z to have same boolean type");
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformBool, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, BOOL_TYPES);
}
void NativeOps::execTransformAny(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
switch (opNum) {
case transform::IsMax: {
bool scalarCheat = false;
if (extraParams == nullptr) {
scalarCheat = true;
}
auto special = reinterpret_cast<double *>(extraPointers[17]);
if (scalarCheat) {
auto scalarShape = ShapeBuilders::createScalarShapeInfo(nd4j::DataType::INT64);
/**
* In case of vector-input for IsMax, it just turns into IndexReduce call + further filler call
*/
execIndexReduceScalar(extraPointers, indexreduce::IndexMax, nullptr, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, scalarShape, special, nullptr);
Nd4jLong maxIdx = -119;
checkCudaErrors(hipStreamSynchronize(*stream));
hipMemcpyAsync(&maxIdx, special, sizeof(Nd4jLong), hipMemcpyDeviceToHost, *stream);
checkCudaErrors(hipStreamSynchronize(*stream));
int targetIdx = 0;
if (shape::order(hXShapeInfo) == 'c' || shape::order(hXShapeInfo) == 'f' && maxIdx * shape::stride(hXShapeInfo)[shape::rank(hXShapeInfo) - 1] >= shape::length(hXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hXShapeInfo)[shape::rank(hXShapeInfo) - 1];
dim3 launchDims(1, 512, 1024);
BUILD_SINGLE_SELECTOR(zType, fillIsMaxGeneric, (launchDims, stream, dZ, shape::length(hZShapeInfo), targetIdx), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed");
delete[] scalarShape;
} else {
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
auto hostTShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[19]);
auto tadMaxShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]);
auto tadMaxOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]);
int *dimension = reinterpret_cast<int *> (extraPointers[15]);
int dimensionLength = getDeviceId(extraPointers[18]);
// we call for IMax on specified dimension
execIndexReduce(extraPointers, indexreduce::IndexMax, nullptr, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, hostTShapeInfo, special, hostYShapeInfo, dimension, dimensionLength);
DEBUG_KERNEL(stream, opNum);
dim3 launchDims(256, 256, 16384);
// at this point, all IMax indexes are gathered, and we execute filler
BUILD_SINGLE_SELECTOR(zType, fillDimensionalIsMaxGeneric, (launchDims, stream, special, dZ, dZShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed");
}
}
break;
default: {
dim3 launchDims(512, 512, 16384);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformAny, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, LIBND4J_TYPES);
}
}
}
void NativeOps::execTransformStrict(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims(512, 512, 16384);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (xType != zType || !DataTypeUtils::isR(xType))
throw datatype_exception::build("NativeOps::execTransformStrict requires X & Z to have same floating point type", xType, zType);
switch (opNum) {
case transform::SoftMax:
case transform::SoftMaxDerivative:
case transform::LogSoftMax: {
if (shape::isVector(hXShapeInfo)) {
int length = shape::length(hXShapeInfo);
int block = nd4j::math::nd4j_min<int>(length, 256);
launchDims.x = 1;
launchDims.y = block;
launchDims.z += (block * sizeof(double) * 4);
BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), FLOAT_TYPES);
} else {
auto shape = shape::shapeOf(hXShapeInfo);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
// special pointer for special buffer for special ops
auto specialPointer = reinterpret_cast<double *>(extraPointers[6]);
auto dimension = reinterpret_cast<int *>(specialPointer);
auto maxDimension = dimension + 1;
auto maxShapeBuffer = reinterpret_cast<Nd4jLong *>(maxDimension + 1);
auto special = reinterpret_cast<double *> (maxShapeBuffer + (MAX_RANK * 2 + 4));
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
Nd4jLong maxShape[2] = {shape::shapeOf(hXShapeInfo)[0], 1};
auto hostMaxShapeBuffer = shape::shapeBuffer(2, xType, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
hipLaunchKernelGGL(( prepareShapeBuffer), dim3(1), dim3(1), 128, *stream, dimension, maxDimension, maxShapeBuffer, shape[0], xType);
DEBUG_KERNEL(stream, opNum);
//shape::printShapeInfo(maxShapeBuffer);
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceSame(tempPointers, reduce::Max, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, maxDimension, 1);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// sub 1
execBroadcast(tempPointers, broadcast::Subtract, hX, hXShapeInfo, dX, dXShapeInfo, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, hZShapeInfo, dZ, dZShapeInfo, dimension, 1);
DEBUG_KERNEL(stream, opNum);
// exp 3
execTransformStrict(extraPointers, transform::Exp, hZ, hZShapeInfo, dZ, dZShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceSame(tempPointers, reduce::Sum, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, maxDimension, 1);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// divide 3
execBroadcast(tempPointers, broadcast::Divide, hZ, hZShapeInfo, dZ, dZShapeInfo, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, hZShapeInfo, dZ, dZShapeInfo, dimension, 1);
DEBUG_KERNEL(stream, opNum);
// log 3
if (opNum == transform::LogSoftMax)
execTransformStrict(extraPointers, transform::Log, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, hZShapeInfo, dZ, dZShapeInfo, extraParams);
else if (opNum == transform::SoftMaxDerivative)
execTransformStrict(extraPointers, transform::SpecialDerivative, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, hZShapeInfo, dZ, dZShapeInfo, extraParams);
nd4j::DebugHelper::checkErrorCode(stream, "SoftMax(...) failed");
delete hostMaxShapeBuffer;
}
}
break;
default: {
BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), FLOAT_TYPES);
}
}
}
void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isR(zType))
throw datatype_exception::build("NativeOps::execTransformFloat requires Z to have floating point type", zType);
if (opNum == transform::Histogram) {
dim3 launchDims(256, 256, 32768);
Nd4jPointer maskedAllocPointer;
auto length = shape::length(hZShapeInfo);
hipMalloc(reinterpret_cast<void **>(&maskedAllocPointer), length * launchDims.x * DataTypeUtils::sizeOf(nd4j::DataType::INT64));
auto imaskedAllocPointer = reinterpret_cast<int *>(maskedAllocPointer);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, imaskedAllocPointer, reductionPointer, nullptr, nullptr), LIBND4J_TYPES, FLOAT_TYPES);
checkCudaErrors(hipStreamSynchronize(*stream));
hipFree(maskedAllocPointer);
} else {
dim3 launchDims(512, 512, 16384);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, FLOAT_TYPES);
}
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param dZ the dZ array
* @param dZShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flatten(Nd4jPointer *extraPointers,
int offset,
char order,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hInput, Nd4jLong *hInputShapeInfo,
void *dInput, Nd4jLong *dInputShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F22 opNum:[7]\n");
// int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hYShapeInfo), 2, funcAttributes[30]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF222 opNum:[7]\n");
auto type = nd4j::ArrayOptions::dataType(hInputShapeInfo);
BUILD_SINGLE_SELECTOR(type, flattenKernelGeneric, (launchDims, stream, extraPointers, offset, order, dZ, dZShapeInfo, dInput, dInputShapeInfo), LIBND4J_TYPES);
DEBUG_KERNEL(stream, -1);
}
void NativeOps::checkP2P() {
int curDevice = 0;
hipGetDevice(&curDevice);
int devCnt = 0;
hipGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
bool tempSupport = true;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY)
continue;
int canAccess = 0;
hipSetDevice(dX);
hipDeviceCanAccessPeer(&canAccess, dX , dY);
if (!canAccess) {
tempSupport = false;
break;
}
}
}
supportedP2P = tempSupport;
hipSetDevice(curDevice);
} else {
// if we have only 1 device - we say that we support P2P, since all data will be on 1 device
supportedP2P = true;
}
}
void NativeOps::enableP2P(bool enable) {
if (enable == allowedP2P)
return;
int curDevice = 0;
hipGetDevice(&curDevice);
int devCnt = 0;
hipGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY)
continue;
int canAccess = 0;
hipSetDevice(dX);
hipDeviceCanAccessPeer(&canAccess, dX , dY);
if (canAccess) {
if (enable) {
hipDeviceEnablePeerAccess(dY, 0);
} else {
hipDeviceDisablePeerAccess(dY);
}
} else {
if (nd4j::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY);
}
}
}
hipSetDevice(curDevice);
}
allowedP2P = enable;
hipSetDevice(curDevice);
}
bool NativeOps::isP2PAvailable() {
return supportedP2P;
}
void NativeOps::initializeDevicesAndFunctions() {
int devCnt = 0;
hipGetDeviceCount(&devCnt);
deviceProperties = new hipDeviceProp_t[devCnt];
for (int i = 0; i < devCnt; i++) {
hipSetDevice(i);
hipGetDeviceProperties(&deviceProperties[i], i);
hipDeviceSetLimit(hipLimitStackSize, 4096);
}
hipSetDevice(0);
checkP2P();
// enabling p2p gpu access if it's supported
if (supportedP2P && devCnt > 1)
enableP2P(allowedP2P);
}
void NativeOps::initializeFunctions(Nd4jPointer *functions) {
nd4j::BlasHelper::getInstance()->initializeDeviceFunctions(functions);
/*
this->hipblasSgemv = (CublasSgemv)functions[0];
this->hipblasDgemv = (CublasDgemv)functions[1];
this->hipblasHgemm = (CublasHgemm)functions[2];
this->hipblasSgemm = (CublasSgemm)functions[3];
this->hipblasDgemm = (CublasDgemm)functions[4];
this->cublasSgemmEx = (CublasSgemmEx)functions[5];
this->hipblasHgemmBatched = (CublasHgemmBatched)functions[6];
this->hipblasSgemmBatched = (CublasSgemmBatched)functions[7];
this->hipblasDgemmBatched = (CublasDgemmBatched)functions[8];
*/
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocHost(Nd4jLong memorySize, int flags) {
Nd4jPointer pointer;
// hipHostMallocMapped |hipHostMallocPortable
hipError_t res = hipHostMalloc(reinterpret_cast<void **>(&pointer), memorySize, hipHostMallocDefault);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocDevice(Nd4jLong memorySize, Nd4jPointer ptrToDeviceId, int flags) {
Nd4jPointer pointer;
hipError_t res = hipMalloc(reinterpret_cast<void **>(&pointer), memorySize);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
int NativeOps::freeHost(Nd4jPointer pointer) {
hipError_t res = hipHostFree(reinterpret_cast<void *>(pointer));
if (res != 0)
pointer = 0L;
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
int NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) {
hipError_t res = hipFree(reinterpret_cast<void *>(pointer));
if (res != 0)
pointer = 0L;
return 1L;
}
Nd4jPointer NativeOps::createContext() {
return 0L;
}
Nd4jPointer NativeOps::createStream() {
Nd4jPointer nativeStream = (Nd4jPointer) malloc(sizeof(hipStream_t));
CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream");
hipError_t dZ = hipStreamCreate(reinterpret_cast<hipStream_t *>(&nativeStream));
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("hipStreamCreate(...) failed");
return nativeStream;
}
Nd4jPointer NativeOps::createEvent() {
Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(hipEvent_t));
CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer");
hipError_t dZ = hipEventCreateWithFlags(reinterpret_cast<hipEvent_t *>(&nativeEvent), hipEventDisableTiming);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("hipEventCreateWithFlags(...) failed");
return nativeEvent;
}
int NativeOps::registerEvent(Nd4jPointer event, Nd4jPointer stream) {
hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event);
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream);
hipError_t dZ = hipEventRecord(*pEvent, *pStream);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("hipEventRecord(...) failed");
return 1;
}
int NativeOps::setDevice(Nd4jPointer ptrToDeviceId) {
int deviceId = getDeviceId(ptrToDeviceId);
hipError_t dZ = hipSetDevice(deviceId);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("hipSetDevice(...) failed");
return 1;
}
Nd4jLong NativeOps::getDeviceFreeMemory(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
int orig = -1;
hipGetDevice(&orig);
if (device >= 0 && device != orig) {
hipSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
hipSetDevice(orig);
}
return (Nd4jLong) memFree;
}
Nd4jLong NativeOps::getDeviceTotalMemory(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
int orig = -1;
hipGetDevice(&orig);
if (device >= 0 && device != orig) {
hipSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
hipSetDevice(orig);
}
return (Nd4jLong) memTotal;
}
int NativeOps::memcpy(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
return memcpyAsync(dst, src, size, flags, reserved);
}
int NativeOps::memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved);
hipMemcpyKind kind;
DEBUG_KERNEL(pStream, 0);
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
}
break;
case 1: {
kind = hipMemcpyHostToDevice;
}
break;
case 2: {
kind = hipMemcpyDeviceToHost;
}
case 3: {
kind = hipMemcpyDeviceToDevice;
}
break;
default: {
printf("UNDEFINED MEMCPY!\n");
break;
}
}
hipError_t dZ = hipMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream);
if (dZ != 0) {
checkCudaErrors(dZ);
printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ));
fflush(stdout);
fflush(stderr);
throw std::runtime_error("hipMemcpyAsync(...) failed");
//return 0L;
}
return 1;
}
int NativeOps::memset(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
hipError_t dZ = hipMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size));
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("hipMemset(...) failed");
return 1;
}
int NativeOps::memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved);
hipError_t dZ = hipMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("hipMemsetAsync(...) failed");
return 1;
}
int NativeOps::destroyEvent(Nd4jPointer event) {
hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event);
hipError_t dZ = hipEventDestroy(*pEvent);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaEvenDestroy(...) failed");
return 1;
}
int NativeOps::streamSynchronize(Nd4jPointer stream) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream);
hipError_t dZ = hipStreamSynchronize(*pStream);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("hipStreamSynchronize(...) failed");
return 1L;
}
int NativeOps::eventSynchronize(Nd4jPointer event) {
hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event);
hipError_t dZ = hipEventSynchronize(*pEvent);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("hipEventSynchronize(...) failed");
return 1L;
}
int NativeOps::getAvailableDevices() {
int devCnt = 0;
hipGetDeviceCount(&devCnt);
return devCnt;
}
void NativeOps::enableDebugMode(bool reallyEnable) {
nd4j::Environment::getInstance()->setDebug(reallyEnable);
}
void NativeOps::setGridLimit(int gridSize) {
if (gridSize > 8192)
gridSize = 8192;
if (gridSize < 1)
gridSize = 1;
blockLimit = gridSize;
}
int NativeOps::ompGetMaxThreads() {
return maxThreads;
}
int NativeOps::ompGetNumThreads() {
return maxThreads;
}
void NativeOps::setOmpNumThreads(int threads) {
if (threads > 1024)
threads = 1024;
if (threads < 32)
threads = 32;
maxThreads = threads;
}
void NativeOps::enableVerboseMode(bool reallyEnable) {
nd4j::Environment::getInstance()->setVerbose(reallyEnable);
}
int NativeOps::getDeviceMajor(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
return deviceProperties[device].major;
}
int NativeOps::getDeviceMinor(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
return deviceProperties[device].minor;
}
const char * NativeOps::getDeviceName(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
return deviceProperties[device].name;
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::concat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data, Nd4jPointer *inputShapeInfo,
Nd4jPointer *ddata, Nd4jPointer *dinputShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hXShapeInfo = hZShapeInfo;
auto hShapePointers = reinterpret_cast<Nd4jLong **>(inputShapeInfo);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 8192;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hZShapeInfo) == 2 && shape::order(hZShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hShapePointers[i]) || shape::elementWiseStride(hShapePointers[i]) <= 0 ||
shape::order(hShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
// let's try to fit N-dimensional vstack
if (!isVstack && !isScalar && dimension == 0 && shape::order(hXShapeInfo) == 'c') {
auto length0 = shape::length(hShapePointers[0]);
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (shape::elementWiseStride(hShapePointers[i]) <= 0 || shape::order(hShapePointers[i]) != 'c' || length0 != shape::length(hShapePointers[i])) {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hZShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hShapePointers[i]) || shape::elementWiseStride(hShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going scalar concat\n");
dim3 launchDims(128, 128, 16384);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
BUILD_SINGLE_SELECTOR(zType, concatKernelScalarGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), dZ), LIBND4J_TYPES);
} else if (isVstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going VStack concat\n");
dim3 launchDims(128, 512, 16384);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
BUILD_SINGLE_SELECTOR(zType, concatKernelVStackGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), reinterpret_cast<Nd4jPointer *>(dinputShapeInfo[0]), dZ, dZShapeInfo), LIBND4J_TYPES);
} else if (isHstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going HStack concat\n");
dim3 launchDims(128, 128, 16384);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
BUILD_SINGLE_SELECTOR(zType, concatKernelHStackGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), reinterpret_cast<Nd4jPointer *>(dinputShapeInfo[0]), dZ, dZShapeInfo), LIBND4J_TYPES);
} else {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going generic concat\n");
auto devZTadShape = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto devZOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
dim3 launchDims(128, 128, 8192);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
BUILD_SINGLE_SELECTOR(zType, concatKernelGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), reinterpret_cast<Nd4jPointer *>(dinputShapeInfo[0]), dZ, dZShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]), devZTadShape, devZOffsets), LIBND4J_TYPES);
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("sharedMemory requested for concatFloat: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
hipError_t res = hipStreamSynchronize(*stream);
checkCudaErrors(res);
nd4j::DebugHelper::checkErrorCode(stream, "Legacy ConcatFloat(...) failed");
}
void NativeOps::specialConcat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
void *dZ,
Nd4jLong *dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
nd4j::SpecialMethods<float>::concatCpuGeneric(
dimension,
numArrays,
data,
inputShapeInfo,
dZ,
dZShapeInfo);
}
/**
* This method saves
*/
void NativeOps::tadOnlyShapeInfo(Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength, Nd4jLong *target, Nd4jLong *offsets) {
shape::TAD tad;
tad.init(dXShapeInfo, dimension, dimensionLength);
//tad->setOutputBuffer(target);
tad.createTadOnlyShapeInfo();
tad.createOffsets();
std::memcpy(reinterpret_cast<void *>(target), tad.tadOnlyShapeInfo, shape::shapeInfoByteLength(tad.tadOnlyShapeInfo));
std::memcpy(reinterpret_cast<void *>(offsets), tad.tadOffsets, tad.numTads * sizeof(Nd4jLong));
}
int NativeOps::memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved);
hipMemcpyKind kind;
DEBUG_KERNEL(pStream, -1);
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
}
break;
case 1: {
kind = hipMemcpyHostToDevice;
}
break;
case 2: {
kind = hipMemcpyDeviceToHost;
}
case 3: {
kind = hipMemcpyDeviceToDevice;
}
break;
}
//hipError_t dZ = hipMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream);
hipError_t dZ = hipMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("hipMemcpyToSymbolAsync(...) failed");
return 1;
}
Nd4jPointer NativeOps::getConstantSpace() {
Nd4jPointer dConstAddr;
hipError_t dZ = hipGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0)
throw std::runtime_error("hipGetSymbolAddress(...) failed");
return dConstAddr;
}
void NativeOps::pullRows(Nd4jPointer *extraPointers,
void *x, Nd4jLong *xShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *z, Nd4jLong *zShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
Nd4jLong n,
Nd4jLong *indexes,
Nd4jLong *tadShapeInfo,
Nd4jLong *tadOffsets,
Nd4jLong *zTadShapeInfo,
Nd4jLong *zTadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims(64, 256, 1024);
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric, (launchDims, stream, dX, dZ, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets), LIBND4J_TYPES);
DEBUG_KERNEL(stream, -1);
}
void NativeOps::average(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jLong *xShapeInfo,
Nd4jPointer *dx, Nd4jLong *dXShapeInfo,
void *z, Nd4jLong *zShapeInfo,
void *dz, Nd4jLong *dzShapeInfo,
int n,
Nd4jLong length,
bool propagate) {
hipStream_t * stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("averageFloat called\n");
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(256, 256, 4096);
// averagingKernelFloat<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(dX, dz, n, length, propagate);
BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate), LIBND4J_TYPES);
}
}
void NativeOps::accumulate(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jLong *xShapeInfo,
Nd4jPointer *dx, Nd4jLong *dXShapeInfo,
void *z, Nd4jLong *zShapeInfo,
void *dz, Nd4jLong *dzShapeInfo,
int n,
Nd4jLong length) {
auto stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("accumulateFloat called\n");
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(n, 256, 16384);
BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n,length), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length), LIBND4J_TYPES);
}
}
void NativeOps::shuffle(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jPointer *xShapeInfo,
Nd4jPointer *dx, Nd4jPointer *dXShapeInfo,
Nd4jPointer *z, Nd4jPointer *zShapeInfo,
Nd4jPointer *dz, Nd4jPointer *dZShapeInfo,
int N,
int *shuffleMap,
Nd4jPointer *tadShapeInfo,
Nd4jPointer *tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
auto dX = reinterpret_cast<void **>(dx);
auto dZ = reinterpret_cast<void **>(dz);
auto xShape = reinterpret_cast<Nd4jLong **>(xShapeInfo);
auto dxShape = reinterpret_cast<Nd4jLong **>(dXShapeInfo);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo);
auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets);
auto xType = nd4j::ArrayOptions::dataType(xShape[0]);
dim3 launchDims(N, 256, 8192);
BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric, (launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset), LIBND4J_TYPES);
DEBUG_KERNEL(stream, 0);
}
/*
void NativeOps::execMetaPredicateShape(Nd4jPointer *extras,
const int opTypeA,
const int opNumA,
const int opTypeB,
const int opNumB,
Nd4jLong N,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraA,
void *extraB,
double scalarA,
double scalarB) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
BUILD_SINGLE_SELECTOR(xType, functions::grid::GRIDShaped, ::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraA, extraB, scalarA, scalarB), LIBND4J_TYPES);
// functions::grid::GRIDShaped<float>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dX, dXShapeInfo, dy, dYShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
*/
bool NativeOps::isExperimentalEnabled() {
return nd4j::Environment::getInstance()->isExperimentalBuild();
}
void NativeOps::setOmpMinThreads(int threads) {
minThreads = nd4j::math::nd4j_max<int>(32, threads);
minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads);
}
int NativeOps::getDevice() {
int curDevice = -1;
hipGetDevice(&curDevice);
return curDevice;
}
void NativeOps::setElementThreshold(int num) {
// this is no-op for CUDA
}
void NativeOps::setTADThreshold(int num) {
// this is no-op for CUDA
}
void NativeOps::execSummaryStats(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
bool biasCorrected) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(256, 256, 32768);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, nullptr), LIBND4J_TYPES, FLOAT_TYPES);
}
void NativeOps::execSummaryStats(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension,
int dimensionLength,
bool biasCorrected,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
dim3 launchDims = dim3(256, 256, 32768);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, biasCorrected, reductionPointer), LIBND4J_TYPES, FLOAT_TYPES);
}
void NativeOps::execReduce3(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
dim3 launchDims(256, 256, 32768);
if (xType != yType)
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Y operand to have X type", xType, yType);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), LIBND4J_TYPES, FLOAT_TYPES)
DEBUG_KERNEL(stream, opNum);
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduce3(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension,
int dimensionLength,
Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
if (xType != yType)
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Y operand to have X type", xType, yType);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), LIBND4J_TYPES, FLOAT_TYPES)
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduce3Scalar(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
if (xType != yType)
throw nd4j::datatype_exception::build("NativeOps::execReduce3Scalar requires Y operand to have X type", xType, yType);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3Scalar requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execScalar(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, allocationPointer, reductionPointer, nullptr), LIBND4J_TYPES, FLOAT_TYPES);
}
void NativeOps::execScalarBool(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hScalar, Nd4jLong *hScalarShapeInfo,
void *dScalar, Nd4jLong *dScalarShapeInfo,
void *extraParams) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(256, 512, 8192);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (xType != yType )
throw std::runtime_error("NativeOps::execScalarBool requires X & Y to have same type");
if (!DataTypeUtils::isB(zType) )
throw std::runtime_error("NativeOps::execScalarBool requires Z operand to have BOOL type");
BUILD_DOUBLE_SELECTOR(xType, zType, functions::scalar::ScalarBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalar, extraParams), LIBND4J_TYPES, BOOL_TYPES);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalarBool(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hScalars, Nd4jLong *hScalarShapeInfo,
void *dScalars, Nd4jLong *dScalarShapeInfo,
void *extraParams,
int *dimension,
int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims(256, 512, 8192);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (xType != yType )
throw nd4j::datatype_exception::build("NativeOps::execScalarBool requires X & Y to have same type", xType, yType);
if (!DataTypeUtils::isB(zType) )
throw nd4j::datatype_exception::build("NativeOps::execScalarBool requires Z operand to have BOOL type", nd4j::DataType::BOOL, zType);
BUILD_DOUBLE_SELECTOR(xType, yType, functions::scalar::ScalarBoolTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, BOOL_TYPES);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalar(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hScalar, Nd4jLong *hScalarShapeInfo,
void *dScalar, Nd4jLong *dScalarShapeInfo,
void *extraParams) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims(256, 512, 8192);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled())
throw nd4j::datatype_exception::build("NativeOps::execScalar both operands must have same data type", xType, yType);
if (!Environment::getInstance()->isExperimentalBuild() && Environment::getInstance()->isDebug()) {
auto sX = DataTypeUtils::asString(xType);
auto sY = DataTypeUtils::asString(yType);
auto sZ = DataTypeUtils::asString(zType);
nd4j_printf("Running execScalar with dtypes: [%s], [%s], [%s]\n", sX.c_str(), sY.c_str(), sZ.c_str());
}
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), LIBND4J_TYPES, LIBND4J_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), LIBND4J_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalar(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hScalars, Nd4jLong *hScalarShapeInfo,
void *dScalars, Nd4jLong *dScalarShapeInfo,
void *extraParams,
int *dimension,
int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled())
throw nd4j::datatype_exception::build("NativeOps::execScalar both operands must have same data type", xType, yType);
dim3 launchDims(256, 256, 16384);
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execAggregate(Nd4jPointer *extraPointers,
int opNum,
void **arguments,
int numArguments,
Nd4jLong **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
void *realArguments,
int numRealArguments,
nd4j::DataType dtype) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numBlocks, numThreads, shmem);
BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateKernelGeneric(launchDims, stream, opNum, arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), FLOAT_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execAggregateFloat(...) failed");
}
void NativeOps::execAggregateBatch(Nd4jPointer *extraPointers,
int numAggregates, int opNum,
int maxArgs, int maxShapes,
int maxIntArrays, int maxIntArraySize,
int maxIdx, int maxReals,
void *ptrToArguments, nd4j::DataType dtype) {
// not implemented yet
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numAggregates, numThreads, shmem);
BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateBatchKernelGeneric(launchDims, stream, opNum, numAggregates, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), FLOAT_TYPES);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execRandom(Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer stateHost,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraArguments) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto sizeOf = sizeof(nd4j::graph::RandomGenerator);
Nd4jPointer stateDevice;
hipError_t res = hipMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf);
checkCudaErrors(hipStreamSynchronize(*stream));
checkCudaErrors(hipMemcpyAsync(stateDevice, stateHost, sizeOf, hipMemcpyHostToDevice, *stream));
dim3 launchDims = dim3(512, 512, 32768);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
// functions::random::RandomFunction<float>::executeCudaSingle(launchDims, extraPointers, opNum, stateHost, dZ, dZShapeInfo, extraArguments),
BUILD_SINGLE_SELECTOR(zType, functions::random::RandomFunction, ::executeCudaSingle(launchDims, extraPointers, opNum, stateDevice, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES);
checkCudaErrors(hipMemcpyAsync(stateHost, stateDevice, sizeOf, hipMemcpyDeviceToHost, *stream));
checkCudaErrors(hipStreamSynchronize(*stream));
hipFree(stateDevice);
}
void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraArguments) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto sizeOf = sizeof(nd4j::graph::RandomGenerator);
Nd4jPointer stateDevice;
hipError_t res = hipMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf);
checkCudaErrors(hipStreamSynchronize(*stream));
checkCudaErrors(hipMemcpyAsync(stateDevice, stateHost, sizeOf, hipMemcpyHostToDevice, *stream));
dim3 launchDims = dim3(512, 512, 32768);
auto xType = nd4j::ArrayOptions::dataType(hZShapeInfo);
// functions::random::RandomFunction<float>::executeCudaDouble(launchDims, extraPointers, opNum, stateHost, dX, dXShapeInfo, dZ, dZShapeInfo, extraArguments);
BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction, ::executeCudaDouble(launchDims, extraPointers, opNum, stateDevice, dX, dXShapeInfo, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES);
checkCudaErrors(hipMemcpyAsync(stateHost, stateDevice, sizeOf, hipMemcpyDeviceToHost, *stream));
checkCudaErrors(hipStreamSynchronize(*stream));
hipFree(stateDevice);
}
void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraArguments) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto sizeOf = sizeof(nd4j::graph::RandomGenerator);
Nd4jPointer stateDevice;
hipError_t res = hipMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf);
checkCudaErrors(hipStreamSynchronize(*stream));
checkCudaErrors(hipMemcpyAsync(stateDevice, stateHost, sizeOf, hipMemcpyHostToDevice, *stream));
dim3 launchDims = dim3(512, 512, 32768);
auto xType = nd4j::ArrayOptions::dataType(hZShapeInfo);
// functions::random::RandomFunction<float>::executeCudaTriple(launchDims, extraPointers, opNum, stateHost, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments);
BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction, ::executeCudaTriple(launchDims, extraPointers, opNum, stateDevice, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES);
checkCudaErrors(hipMemcpyAsync(stateHost, stateDevice, sizeOf, hipMemcpyDeviceToHost, *stream));
checkCudaErrors(hipStreamSynchronize(*stream));
hipFree(stateDevice);
}
Nd4jPointer NativeOps::initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) {
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
// we don't synchronize at random initialization, it's safe to go unsync here
// hipStreamSynchronize(*stream);
auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer);
auto buffer = new nd4j::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev));
buffer->propagateToDevice(buffer, *stream);
nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A");
// we generate sequence in the host memory
nd4j::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// and copy it to gpu
hipMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, hipMemcpyHostToDevice, *stream);
nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B");
return buffer;
}
void NativeOps::destroyRandom(Nd4jPointer ptrBuffer) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer);
// FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice
hipDeviceSynchronize();
delete buffer;
}
void NativeOps::refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom);
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
hipStreamSynchronize(*stream);
uint64_t *ptrDev = buffer->getDeviceBuffer();
// update rng state
buffer->setSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
// refresh buffer on host size
nd4j::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// copy back to gpu
hipMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, hipMemcpyHostToDevice, *stream);
}
void NativeOps::reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
hipStreamSynchronize(*stream);
// update rng state
buffer->reSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
}
/**
* Return the length of a shape buffer
* based on the pointer
* @param buffer the buffer pointer to check
* @return
*/
int NativeOps::lengthForShapeBufferPointer(Nd4jPointer buffer) {
auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer);
return shape::shapeInfoLength(shape::rank(shapeBuffer));
}
/**
* The pointer to get the address for
*
* @param address the address to get the pointer
* @return the pointer for the given address
*/
Nd4jPointer NativeOps::pointerForAddress(Nd4jLong address) {
return reinterpret_cast<Nd4jPointer >(address);
}
void NativeOps::tear(Nd4jPointer *extras,
void *x, Nd4jLong *xShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
Nd4jPointer *targets,
Nd4jLong *zShapeInfo,
Nd4jLong *tadShapeInfo,
Nd4jLong *tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
dim3 launchDims(512, 512, 512);
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, tearKernelGeneric, (launchDims, stream, dX, dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed");
}
void prescanArrayRecursive(Nd4jPointer *extras, int *dZ, int *dX, int numElements, int level) {
auto stream = reinterpret_cast<hipStream_t *>(&extras[1]);
auto g_scanBlockSums = reinterpret_cast<int **>(&extras[2]);
int blockSize = 512; // max size of the thread blocks
int numBlocks = nd4j::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize))));
int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (nd4j::isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = nd4j::floorPow2(numElements);
int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
int numEltsLastBlock =
numElements - (numBlocks-1) * numEltsPerBlock;
int numThreadsLastBlock = nd4j::math::nd4j_max<int>(1, numEltsLastBlock / 2);
int np2LastBlock = 0;
int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
int extraSpace = numEltsPerBlock / NUM_BANKS;
int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace);
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
dim3 gridOnes(1, 1, 1);
dim3 threadsOnes(numThreadsLastBlock, 1, 1);
if (sharedMemSize < 2048)
sharedMemSize = 2048;
if (sharedMemLastBlock < 2048)
sharedMemLastBlock = 2048;
// execute the scan
if (numBlocks > 1) {
nd4j::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
nd4j::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be sdded to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1);
hipLaunchKernelGGL(( nd4j::uniformAdd), dim3(grid), dim3(threads), 1024, *stream, dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
hipLaunchKernelGGL(( nd4j::uniformAdd), dim3(1), dim3(numThreadsLastBlock), 1024, *stream, dZ, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
nd4j::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0);
} else {
nd4j::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0);
}
}
void NativeOps::encodeThresholdP1(Nd4jPointer *extras, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
dim3 launchDims(numBlocks, blockSize, 1024);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
BUILD_SINGLE_SELECTOR(xType, encoderKernelP1Generic, (launchDims, stream, dx, N, dz, threshold), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP1Float(...) failed");
}
void NativeOps::encodeThresholdP2Int(Nd4jPointer *extraPointers, int *dx, Nd4jLong N, int *dz) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
//encoderKernelP2Float<<<numBlocks, blockSize , 1024 * sizeof(float), *stream>>>(dx, N, dz);
prescanArrayRecursive(extraPointers, dz, dx + 1, (int) N, 0);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP2Int(...) failed");
}
void NativeOps::encodeThresholdP3(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, int *offsets, Nd4jLong N, int *dz){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
dim3 launchDims(numBlocks, blockSize, 4096);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
BUILD_SINGLE_SELECTOR(xType, encoderKernelP3Generic, (launchDims, stream, dx, offsets, N, dz), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP3Float(...) failed");
}
void NativeOps::decodeThreshold(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
// we probably want to have smaller blocks here, memory writes are misaligned anyway
int blockSize = 128;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
dim3 launchDims(numBlocks, blockSize, 1024);
auto zType = nd4j::ArrayOptions::dataType(zShapeInfo);
BUILD_SINGLE_SELECTOR(zType, decoderKernelGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "decodeThresholdFloat(...) failed");
}
void NativeOps::execReduce3All(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParamsVals,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets,
Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D119 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims(shape::length(hZShapeInfo), 256, 32768);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AD119 opNum:[%i]\n", opNum);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled())
throw nd4j::datatype_exception::build("NativeOps::execReduce3All both operands must have same data type", xType, yType);
if (yType != xType)
throw nd4j::datatype_exception::build("NativeOps::execReduce3All both operands must have same data type", xType, yType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execAll(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParamsVals, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets), LIBND4J_TYPES, FLOAT_TYPES);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::sort(Nd4jPointer *extraPointers,
void *x, Nd4jLong *xShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
bool descending) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[ 1]);
auto xLength = shape::length(xShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2*k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric, (launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES);
}
}
} else {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window<<=1) {
int n = window;
int rev = 0;
do{
int half = n >> 1;
BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric, (launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES);
n>>=1;
rev = 1;
} while(n > 1);
}
}
nd4j::DebugHelper::checkErrorCode(stream, "sort(...) failed");
}
void NativeOps::sortTad(Nd4jPointer *extraPointers,
void *x, Nd4jLong *xShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
int *dimension,
int dimensionLength,
Nd4jLong *tadShapeInfo,
Nd4jLong *tadOffsets,
bool descending) {
// to be implemented
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims(512, 512, 32768);
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, oesTadGeneric, (launchDims, stream, dX, dXShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "sortTadFloat(...) failed");
}
void NativeOps::sortCooIndices(Nd4jPointer *extraPointers, Nd4jLong *indices, void *values, Nd4jLong length, int rank) {
throw std::runtime_error("sortCooIndices:: Not implemented yet");
}
Nd4jLong NativeOps::encodeBitmap(Nd4jPointer *extraPointers,
void *dx, Nd4jLong *hXShapeInfo,
Nd4jLong N,
int *dz,
float threshold) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *resultPointer = reinterpret_cast<int *>(extraPointers[2]);
int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims(512, 512, 32768);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
BUILD_SINGLE_SELECTOR(xType, cudaEncodeBitmapGeneric, (launchDims, stream, dx, N, dz, resultPointer, reductionPointer, threshold), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "encodeBitmapFloat(...) failed");
Nd4jLong dZ = (Nd4jLong) resultPointer[0];
resultPointer[0] = 0;
return dZ;
}
void NativeOps::decodeBitmap(Nd4jPointer *extraPointers,
void *dx,
Nd4jLong N,
void *dz, Nd4jLong *zShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims(512, 512, 16384);
auto xType = nd4j::ArrayOptions::dataType(zShapeInfo);
BUILD_SINGLE_SELECTOR(xType, cudaDecodeBitmapGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "decodeBitmapFloat(...) failed");
}
Nd4jLong* NativeOps::mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) {
return nullptr;
}
void NativeOps::munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) {
}
nd4j::graph::ResultWrapper* NativeOps::executeFlatGraph(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
return nullptr;
}
const char* NativeOps::getAllCustomOps() {
return nd4j::ops::OpRegistrator::getInstance()->getAllCustomOperations();
}
nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
nd4j::graph::VariableSpace varSpace;
Context block(2, &varSpace);
nd4j::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++) {
auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
// we shouldn't copy buffer if that's empty array
void *buffer_ = nd4j::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
auto array = new nd4j::NDArray(buffer_, shape_);
array->triggerAllocationFlag(false, false);
// block should contain references to proper variable
varSpace.putVariable(1, e, array);
block.pickInput(1, e);
inShapes.push_back(shape_);
}
auto shapeList = op->calculateOutputShape(&inShapes, block);
if (varSpace.workspace() != nullptr)
shapeList->detach();
return shapeList;
}
nd4j::ShapeList* NativeOps::calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
Context block(1);
nd4j::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++)
inShapes.push_back(reinterpret_cast<Nd4jLong *>(inputShapes[e]));
auto shapeList = op->calculateOutputShape(&inShapes, block);
return shapeList;
}
nd4j::ShapeList* NativeOps::calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
static FORCEINLINE Nd4jStatus realExec(nd4j::ops::DeclarableOp* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) {
if (op == nullptr)
nd4j_printf("Can't find requested operation: [%lld]\n", hash);
// we're using the same fake nodeId everywhere here
std::vector<nd4j::NDArray*> inputs(numInputs);
std::vector<nd4j::NDArray*> outputs(numOutputs);
std::vector<double> ttArgs(numTArgs);
std::vector<bool> bbArgs(0);
std::vector<Nd4jLong> iiArgs(numIArgs);
// filling block now with inputs
for (int e = 0; e < numInputs; e++) {
auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
inputs[e] = new nd4j::NDArray(buffer, shape);
}
// if not inplace - transferring output arrays
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
// we want to keep original output shape intact
auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e]));
void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e];
auto array = new nd4j::NDArray(buffer, shape);
outputs[e] = array;
// and we want to release shape copy once we're done
array->triggerAllocationFlag(false, true);
}
for (int e = 0; e < numIArgs; e++)
iiArgs[e] = iArgs[e];
for (int e = 0; e < numTArgs; e++)
ttArgs[e] = tArgs[e];
// hypothetically at this point we have everything filled
auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, isInplace);
//auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace);
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
//shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]);
//shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo());
//outputs[e]->printIndexedBuffer("C++ raw output");
//outputs[e]->printBuffer("C++ indexed output");
if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])))
outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])));
}
/*
if (!isInplace) {
if (dZ->size() != numOutputs) {
return ND4J_STATUS_BAD_OUTPUT;
}
for (int e = 0; e < numOutputs; e++) {
auto buffer = (T *) outputBuffers[e];
auto shape = (int *) outputShapes[e];
nd4j::NDArray<T> tmp(buffer, shape);
if (tmp.lengthOf() != dZ->at(e)->lengthOf()) {
nd4j_printf("Provided output array for [%s] has length of %i, but actual dZ has length of %i\n", op->getOpName()->c_str(), tmp.lengthOf(), dZ->at(e)->lengthOf());
return ND4J_STATUS_BAD_OUTPUT;
}
tmp.assign(dZ->at(e));
}
} else {
// if op is inplace, our ResultSet holds pointers
dZ->purge();
}
delete dZ;
*/
for (auto v: inputs)
delete v;
for (auto v: outputs)
delete v;
return Status::OK();
}
int NativeOps::execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash);
return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace);
}
int NativeOps::registerGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) {
auto graph = nd4j::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer);
nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph);
return ND4J_STATUS_OK;
}
static VariablesSet* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
auto graph = nd4j::graph::GraphHolder::getInstance()->pullGraph(graphId);
auto varSpace = graph->getVariableSpace()->clone();
std::vector<nd4j::NDArray*> handles;
for (int e = 0; e < numInputs; e++) {
auto idx = inputIndices[e];
// we'll delete this array later, together with cloned VariableSpace
auto array = new nd4j::NDArray(inputBuffers[e], reinterpret_cast<Nd4jLong *>(inputShapes[e]));
handles.emplace_back(array);
if (varSpace->hasVariable(idx)) {
auto var = varSpace->getVariable(idx);
if (var->hasNDArray())
delete var->getNDArray();
var->setNDArray(array);
} else
varSpace->putVariable(idx, array);
}
auto dZ = nd4j::graph::GraphExecutioner::execute(graph, varSpace);
auto varSet = new nd4j::graph::VariablesSet(dZ);
if (dZ == ND4J_STATUS_OK) {
// pull back results, and provide them
auto outputs = graph->fetchOutputs();
for (int e = 0; e < outputs->size(); e++) {
// we're only getting variable ID/Index from original grap. values will be taken from cloned workspace
std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index());
auto var = varSpace->getVariable(varId);
varSet->push_back(var->clone());
}
delete outputs;
}
delete varSpace;
return varSet;
}
VariablesSet* NativeOps::executeStoredGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
}
int NativeOps::unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) {
nd4j::graph::GraphHolder::getInstance()->dropGraphAny(graphId);
return ND4J_STATUS_OK;
}
void NativeOps::deletePointerArray(Nd4jPointer pointer) {
Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer);
delete[] ptr;
}
void NativeOps::deleteIntArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<int *>(pointer);
delete[] ptr;
}
void NativeOps::deleteLongArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<Nd4jLong *>(pointer);
delete[] ptr;
}
template <typename T>
static void deleteVariablesSetT(Nd4jPointer pointer) {
nd4j::graph::VariablesSet* ptr = reinterpret_cast<nd4j::graph::VariablesSet*>(pointer);
delete ptr;
}
void NativeOps::deleteVariablesSet(Nd4jPointer pointer) {
deleteVariablesSetT<double>(pointer);
}
void NativeOps::deleteShapeList(Nd4jPointer shapeList) {
nd4j::ShapeList* list = reinterpret_cast<nd4j::ShapeList*>(shapeList);
list->destroy();
delete list;
}
const char* NativeOps::getAllOperations() {
return nd4j::OpTracker::getInstance()->exportOperations();
}
Nd4jPointer NativeOps::getGraphState(Nd4jLong id) {
return (Nd4jPointer) new nd4j::graph::GraphState(id);
}
void NativeOps::deleteGraphState(Nd4jPointer state) {
auto stateP = reinterpret_cast<nd4j::graph::GraphState*>(state);
delete stateP;
}
Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, nd4j::graph::GraphState *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
/**
* That's basically exec, with VariableSpace provided in GraphState:
* depending on operation (i.e. while of if), different logic executors could be used
*/
auto graph = state->graph();
auto varSpace = state->variableSpace();
// Node is dynamically created, and has nothing beyond it: only inputs and outputs
// this node has id of 0, and inputs are
Node node(OpType_LOGIC, opHash, 0);
// mapping inputs
for (int e = 0; e < numInputs; e++) {
auto buffer = inputBuffers[e];
auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
auto array = new nd4j::NDArray(buffer, shapeInfo, varSpace->workspace());
// now we just put array to VarSpace
varSpace->putVariable(0, e, array);
node.pickInput(0, e);
}
// mapping scopes
for (int e = 0; e < numScopes; e++) {
// we should check scope existence in GraphState/Graph
int scopeId = (int) scopes[e];
if (!state->hasScope(scopeId)) {
// nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId);
return Status::THROW();
}
node.pickInput(scopeId, 0);
}
auto dZ = LogicExecutor::processNode(graph, &node);
if (dZ != Status::OK())
return dZ;
// mapping outputs
for (int e = 0; e < numOutputs; e++) {
auto buffer = outputBuffers[e];
auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]);
NDArray array(buffer, shapeInfo, varSpace->workspace());
// now we just put array to VarSpace to the same ID
//varSpace->putVariable(0, e, array);
auto t = varSpace->getVariable(0, e)->getNDArray();
array.assign(t);
}
// removing input variables
for (int e = 0; e < numInputs; e++) {
varSpace->dropVariable(0, e);
}
// after some bla-bla-bla we should have Graph and Node for current op
return Status::OK();
}
Nd4jStatus NativeOps::execCustomOpWithScope(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
return execCustomOpWithScope(extraPointers, reinterpret_cast<nd4j::graph::GraphState*>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs);
}
void NativeOps::deleteResultWrapper(Nd4jPointer ptr) {
// just 0 room for compiler s@!t
auto p = reinterpret_cast<nd4j::graph::ResultWrapper *>(ptr);
delete p;
}
int NativeOps::estimateThreshold(Nd4jPointer *extraPointers, Nd4jPointer dX, Nd4jLong *dXShapeInfo, int N, float threshold) {
throw std::runtime_error("estimateThreshold: Not implemented yet");
}
/*
* TypeDef:
* void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, long N, int dstType, Nd4jPointer dZ);
*/
void NativeOps::convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, Nd4jLong N, int dstType, Nd4jPointer dZ) {
auto dx = reinterpret_cast<void *>(dX);
auto dz = reinterpret_cast<void *>(dZ);
if (srcType == ND4J_FLOAT8) {
if (dstType == ND4J_FLOAT8) {
// convertKernel<double, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT8) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<nd4j::int8, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
//convertKernel<nd4j::int8, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: eventually we might want to add it
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_UINT8) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<uint8_t, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: still might want to add
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT16) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<float16, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: .... ^^^
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//nd4j::convertToThreshold<float16>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT16) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<int16_t, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO...
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT24) {
} else if (srcType == ND4J_FLOAT32) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<float, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//nd4j::convertToThreshold<float>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_DOUBLE) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<double, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//
} else if (dstType == ND4J_THRESHOLD) {
//nd4j::convertToThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_THRESHOLD) {
if (dstType == ND4J_FLOAT16) {
//nd4j::convertFromThreshold<float16>(nullptr, dx, N, dz);
} else if (dstType == ND4J_FLOAT32) {
//nd4j::convertFromThreshold<float>(nullptr, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//nd4j::convertFromThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
}
Nd4jPointer NativeOps::createUtf8String(Nd4jPointer *extraPointers, const char *string, int length) {
auto u = new nd4j::utf8string(string, length);
return reinterpret_cast<Nd4jPointer>(u);
}
void NativeOps::deleteUtf8String(Nd4jPointer *extraPointers, Nd4jPointer ptr) {
delete(reinterpret_cast<nd4j::utf8string*>(ptr));
}
|
6cc7774d63fb306ff93e6476bf6a4e0861f7acba.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#include "../NativeOps.h"
#include <cuda.h>
#include <cuda_launch_config.h>
#include <buffer.h>
#include <helpers/shape.h>
#include "../Environment.h"
#include <helpers/TAD.h>
#include <ops/specials.h>
#include <loops/reduce3.h>
#include <loops/indexreduce.h>
#include <loops/summarystatsreduce.h>
#include <loops/random.h>
#include <loops/broadcasting.h>
#include <loops/broadcasting_bool.h>
#include <loops/scalar.h>
#include <loops/scalar_bool.h>
#include <loops/pairwise_transform.h>
#include <loops/pairwise_bool.h>
#include <loops/transform_same.h>
#include <loops/transform_float.h>
#include <loops/transform_strict.h>
#include <loops/transform_bool.h>
#include <loops/transform_any.h>
#include <loops/reduce_float.h>
#include <loops/reduce_same.h>
#include <loops/reduce_bool.h>
#include <loops/reduce_long.h>
//#include <thread>
#include <map>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
#include <cuda_device_runtime_api.h>
#include <pointercast.h>
#include <stdio.h>
#include <stdlib.h>
#include <loops/type_conversions.h>
#include <op_boilerplate.h>
#include <loops/aggregates.h>
#include <helpers/threshold.h>
#include <ShapeList.h>
#include <Context.h>
#include <ops/specials_cuda.h>
#include <graph/exceptions/datatype_exception.h>
#include <helpers/CudaLaunchHelper.h>
// FIXME: we need cuda-specific implementations
#include <helpers/logger.h>
#include <NDArray.h>
#include <GraphExecutioner.h>
#include <graph/GraphHolder.h>
#include <graph/VariablesSet.h>
#include <ops/declarable/OpRegistrator.h>
#include <ops/declarable/CustomOperations.h>
//#include <sys/time.h>
// b40c only available for gcc :(
#ifdef __clang__
// do nothing
#elif __GNUC__
#include <b40c/util/error_utils.cuh>
#include <b40c/util/multiple_buffering.cuh>
#include <b40c/radix_sort/enactor.cuh>
#endif
#include <curand.h>
#include <Status.h>
#include <helpers/DebugHelper.h>
using namespace nd4j;
#include <loops/special_kernels.h>
cudaDeviceProp *deviceProperties;
cudaFuncAttributes *funcAttributes = new cudaFuncAttributes[64];
int blockLimit = 128;
int maxThreads = 512;
bool allowedP2P = false;
bool supportedP2P = false;
#ifdef __ND4J_EXPERIMENTAL__
bool experimentalSupport = true;
#else
bool experimentalSupport = false;
#endif
int minThreads = 32;
__constant__ char deviceConstantMemory[49152];
typedef struct {
long streamId;
long callId;
} __syncInfo;
typedef __syncInfo SyncInfo;
/**
* This is utility kernel, that updates given special buffer with proper values in device memory
*/
extern "C" __global__ void prepareShapeBuffer(int *dimension, int *maxDimension, Nd4jLong *specialPointer, int rows, nd4j::DataType dataType) {
Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid > 0)
return;
dimension[0] = 0;
maxDimension[0] = 1;
specialPointer[0] = 2;
specialPointer[1] = rows;
specialPointer[2] = 1;
specialPointer[3] = 1;
specialPointer[4] = 1;
specialPointer[5] = 0;
specialPointer[6] = 1;
specialPointer[7] = 99;
ArrayOptions::setDataType(specialPointer, dataType);
//printf("special[0]: [%lld]\n", (long long) specialPointer[0]);
//shape::printShapeInfoLinear("prepareShapeBuffer", specialPointer);
}
// this method isn't used, left here for legacy and caution purposes
// TLDR: don't use this way, it sucks
void CUDART_CB syncCallback(cudaStream_t stream, cudaError_t status, void *data){
SyncInfo *sync = reinterpret_cast<SyncInfo *>(data);
//printf("Finished stream: [%i], kernel call: [%i]\n", sync->streamId, sync->callId);
}
// this method just does type conversion in fancy way
int getDeviceId(Nd4jPointer ptrToDeviceId) {
return (int)(Nd4jLong)ptrToDeviceId;
}
template <typename T>
dim3 getOptimalDimensions(Nd4jLong n,cudaFuncAttributes attributes, cudaDeviceProp properties) {
// we can combine the two to compute a block size
int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties);
// no real sense launching more threads, then number of elements we have
if (num_threads > n) num_threads = n;
if (maxThreads > 0 && num_threads > maxThreads) num_threads = maxThreads;
// compute the number of blocks of size num_threads to launch
int num_blocks = n / num_threads;
// check for partial block at the end
if (num_blocks > blockLimit) num_blocks = blockLimit;
if (num_blocks < 4 && n > 128) {
num_blocks = 4;
num_threads = n / num_blocks;
}
if (num_threads >= 768) {
num_blocks = num_blocks * 2;
num_threads = num_threads / 2;
}
if(n % num_threads && num_blocks < blockLimit) ++num_blocks;
//(num_threads * sizeof(T)) + attributes.sharedSizeBytes);
return dim3(num_blocks,num_threads, 3000);
}
int getBaseMemorySize(int xRank, cudaFuncAttributes funcAttr) {
int memory_limit = 256; //funcAttr.sharedSizeBytes;
// TODO: remove this later
memory_limit += sizeof(UnifiedSharedMemory) + 32; // sizeof(shape::TAD) + (xRank * 4 * 4)
/*
if (xRank == 0) xRank = 2;
memory_limit += (xRank * 2 + 4) * 3 * 4; // we reserve memory for xShape + T1/T2 shapes
memory_limit += yRank == 0 ? 0 : (yRank * 2 + 4) * 4;
memory_limit += zRank == 0 ? 0 : (zRank * 2 + 4) * 4;
memory_limit += (xRank * 4) * 6;
memory_limit += MAX_RANK * 4; // special case, needed roughtly in one pase
*/
return memory_limit;
}
/*
* Basic CUDA constants here: number of blocks per MP
*/
int getDeviceBlockThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
int blockThreshold = 8;
if (ccMajor >= 5)
blockThreshold = 32;
else if (ccMajor == 3)
blockThreshold = 16;
else if (ccMajor < 3)
blockThreshold = 8;
return blockThreshold;
}
dim3 getBasicLaunchParams(int deviceId, long problemLength, int sharedMemoryPerThread, cudaFuncAttributes funcAttr) {
int countMP = deviceProperties[deviceId].multiProcessorCount;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int num_threads = problemLength / (countMP * blockThreshold);
num_threads = nd4j::math::nd4j_min<int>(num_threads, maxThreads);
num_threads = nd4j::math::nd4j_max<int>(num_threads, 64);
num_threads = nd4j::math::nd4j_max<int>(num_threads, minThreads);
int num_blocks = nd4j::math::nd4j_max<int>(problemLength / num_threads, 1);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
int memory_limit = (sharedMemoryPerThread * num_threads) + getBaseMemorySize(1, funcAttr);
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary basic launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i]\n", num_blocks, num_threads, memory_limit);
return launchDims;
}
/*
* This message returns shared memory threshold value. default overflow ratio is 0.3
*/
int getDeviceSharedThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
// please note threshold isn't multiple of 32, and that's NOT a mistake
int shmemThreshold;
if (ccMajor == 6 && ccMinor == 0)
shmemThreshold = 65536;
else if (ccMajor == 6 && ccMinor == 1)
shmemThreshold = 49152;
else if (ccMajor == 5 && ccMinor == 2)
shmemThreshold = 98304;
else if (ccMajor == 5)
shmemThreshold = 65536;
else if (ccMajor == 3 && ccMinor == 7)
shmemThreshold = 114688;
else shmemThreshold = 49152;
return shmemThreshold / 0.3;
}
dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, cudaFuncAttributes funcAttr, int dimensionLength, int elementSize, int reduction) {
int num_threads = nd4j::math::nd4j_min<int>(tadLength, maxThreads);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int warpSize = deviceProperties[deviceId].warpSize;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
// round num_threads to nearest warpSize
num_threads -= num_threads % warpSize;
num_threads = nd4j::math::nd4j_max<int>(1, num_threads);
if (num_threads < warpSize && tadLength < warpSize)
num_threads = tadLength;
// since we use shared memory as fast memory for some cases - we need to count that in
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int memory_floor = memory_limit;
int effective_block_limit = countMP * blockThreshold;
int num_blocks = numTads; //nd4j::math::nd4j_min<int>(numTads, effective_block_limit);
int desiredShared = shmemThreshold / nd4j::math::nd4j_max<int>((num_blocks / countMP), 1);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Launch context: numBlocks: [%i], numThreads: [%i], countMap: [%i], shmemThreshold: [%i], desiredShared: [%i], elementSize: [%i]\n", num_blocks, num_threads, countMP, shmemThreshold, desiredShared, elementSize);
// at this moment we've stored all required information for things. time to count in reduction multipliers
int reduction_per_block = 0;
bool found = false;
if (reduction > 0)
while (!found) {
reduction_per_block = (num_threads * elementSize * reduction);
if (memory_limit + reduction_per_block < desiredShared) {
memory_limit += reduction_per_block;
found = true;
} else {
if (num_threads > minThreads) {
num_threads -= 32;
} else {
memory_limit += reduction_per_block;
found = true;
}
}
}
// at this moment we know total memory used per block, and we also know per-mp limit.
int max_active_blocks = shmemThreshold / nd4j::math::nd4j_max<int>(memory_limit, 1);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("MAB: [%i], memory_floor: [%i], memory_limit: [%i], reductionPerBlock: [%i]\n", max_active_blocks, memory_floor, memory_limit, reduction_per_block);
// we don't want to spawn more blocks, that gpu can actually handle without queue
//num_blocks = nd4j::math::nd4j_min<int>(num_blocks, max_active_blocks);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
// if (num_blocks > countMP)
// num_blocks = num_blocks - (num_blocks % countMP);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= minThreads)
break;
num_threads -= 32;
}
reduction_per_block = (num_threads * elementSize * reduction);
memory_limit = memory_floor + reduction_per_block;
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary reduce launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], reduction_per_block: [%i], blocksPerMP: [%i]\n", num_blocks, num_threads, memory_limit, reduction_per_block, targetBlocksPerMP);
return dim3(num_blocks,num_threads, memory_limit);
}
/*
* This method returns kernel launch param for linear memory access
*/
dim3 getFlatLaunchParams(int deviceId, Nd4jLong *dXShapeInfo, Nd4jLong *dYShapeInfo, cudaFuncAttributes funcAttr) {
auto xRank = shape::rank(dXShapeInfo);
auto yRank = dYShapeInfo == nullptr ? 0 : shape::rank(dYShapeInfo);
auto zRank = 0;
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
auto xLength = shape::length(dXShapeInfo);
int effective_block_limit = countMP * blockThreshold;
// for flat calls we just want as much concurrent blocks, as possible, and we're not tied to TAD here
int num_threads = xLength / effective_block_limit;
if (num_threads < minThreads)
num_threads = minThreads;
num_threads = num_threads - (num_threads % 32);
int memory_floor = memory_limit;
int num_blocks = xLength / num_threads;
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
// num_blocks = nd4j::math::nd4j_min<int>(num_blocks, effective_block_limit);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048 && num_threads >= 128) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= minThreads)
break;
num_threads -= 32;
}
}
if (xLength / num_threads > blockLimit)
num_blocks *= 2;
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary scalar launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], blocksPerMP: [%i], problemLength: [%i], effectiveBlockLimit: [%i]\n", num_blocks, num_threads, memory_limit, targetBlocksPerMP, xLength, effective_block_limit);
return launchDims;
}
/**
* This method returns kernel launch params with TAD-based memory access
*
* @param deviceId
* @param dXShapeInfo
* @param tadShapeInfo
* @param funcAttr
* @param dimensionLength
* @param elementSize
* @param reductionSize
* @return
*/
dim3 getReduceLaunchParams(int deviceId, Nd4jLong *dXShapeInfo, Nd4jLong *tadShapeInfo, cudaFuncAttributes funcAttr, int dimensionLength, int elementSize, int reductionSize) {
Nd4jLong tadLength = 0;
Nd4jLong numTads = 0;
if (tadShapeInfo != nullptr) {
tadLength = shape::length(tadShapeInfo);
numTads = shape::length(dXShapeInfo) / tadLength;
if (tadLength == 1) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("A xLength: [%i], zLength: [%i]\n", shape::length(dXShapeInfo), shape::length(tadShapeInfo));
}
} else{
// we have special case - reduction along all dimensions
tadLength = nd4j::math::nd4j_min<int>(shape::length(dXShapeInfo), 768);
numTads = shape::length(dXShapeInfo) / tadLength;
}
auto xRank = shape::rank(dXShapeInfo);
int zRank = tadShapeInfo == nullptr ? 0 : shape::rank(tadShapeInfo);
dim3 launchDims = getBetterDimensions(deviceId, numTads, tadLength, xRank, funcAttr, dimensionLength, elementSize, reductionSize);
if (nd4j::Environment::getInstance()->isDebugAndVerbose()) { //|| launchDims.dX == 1
printf("Reduce LaunchParams: xLength: [%i], numTads: [%i], tadLength: [%i], launchDims.dX: [%i], launchDims.dY: [%i], launchDims.dZ: [%i]\n", shape::length(dXShapeInfo), numTads, tadLength, launchDims.x, launchDims.y, launchDims.z);
}
return launchDims;
}
/**
* Returns optimal launch parameters
* given the extra pointers passed in.
* The extra pointer should be
* the host pointer for the shape information
* associated with the data.
* From there it is used to obtain the length
* from which we can derive the optimal launch parameters.
*
*/
template <typename T>
dim3 getOptimalLaunchParameters(const Nd4jLong *hXShapeInfo, cudaFuncAttributes attributes, cudaDeviceProp properties) {
auto n = shape::length(hXShapeInfo);
dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y));
return launchDims;
}
nd4j::buffer::Buffer<Nd4jLong> * createScalarBuffer(cudaStream_t stream) {
Nd4jLong *scalarShapeInfo = shape::createScalarShapeInfo();
nd4j::buffer::Buffer<Nd4jLong> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream);
nd4j::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
nd4j::buffer::Buffer<Nd4jLong> *scalarDimension;
nd4j::buffer::Buffer<Nd4jLong> *scalarShapeInfo;
// std::thread::id threadId;
public:
ScalarShapeInformation(cudaStream_t stream) {
auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong)));
CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer");
scalarDimensionBuff[0] = MAX_DIMENSION;
scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream);
scalarShapeInfo = createScalarBuffer(stream);
// threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
nd4j::buffer::freeBuffer(&scalarShapeInfo);
nd4j::buffer::freeBuffer(&scalarDimension);
}
Nd4jLong *getShapeInfoHostPointer() {
return scalarShapeInfo->data;
}
Nd4jLong * getShapeInfoGpuPointer() {
return scalarShapeInfo->gData;
}
Nd4jLong * getDimensionHostPointer() {
return scalarDimension->data;
}
Nd4jLong * getDimensionGpuPointer() {
return scalarDimension->gData;
}
};
template <typename T>
class ScalarInfo {
nd4j::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
cudaStream_t streamRef;
public:
ScalarInfo(cudaStream_t stream) {
T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T)));
CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer");
shapeInfo = new ScalarShapeInformation(stream);
scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream);
streamRef = stream;
nd4j::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
nd4j::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
Nd4jLong *getDeviceShapeInfo() {
return shapeInfo->getShapeInfoGpuPointer();
}
/**
* Get the dZ pointers
*/
T *getDevicePointer() {
return scalarData->gData;
}
/**
* Get the infinite dimension device pointer
*/
Nd4jLong *getDimensionDevicePointer() {
return shapeInfo->getDimensionGpuPointer();
}
~ScalarInfo() {
nd4j::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
void NativeOps::execPairwiseTransform(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
dim3 launchDims(256, 1024, 8192);
if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled())
throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransform both operands must have same data type", xType, yType);
if (xType != zType && yType != zType)
throw std::runtime_error("NativeOps::execPairwiseTransform requires Z operand to have either X or Y type");
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dY, dYShapeInfo, hYShapeInfo, dZ, dZShapeInfo, hZShapeInfo, extraParams), LIBND4J_TYPES, LIBND4J_TYPES)
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dY, dYShapeInfo, hYShapeInfo, dZ, dZShapeInfo, hZShapeInfo, extraParams), LIBND4J_TYPES)
#endif
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execPairwiseTransformBool(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isB(zType))
throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransformBool wrong Z operand data type", nd4j::DataType::BOOL, zType);
if (yType != xType)
throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransformBool both operands must have same data type", xType, yType);
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims(256, 1024, 16384);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::pairwise_transforms::PairWiseBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams), LIBND4J_TYPES, BOOL_TYPES)
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execSummaryStatsScalar(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
bool biasCorrected) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
dim3 launchDims = dim3(256, 256, 32768);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, reductionPointer), LIBND4J_TYPES, FLOAT_TYPES);
}
void NativeOps::execBroadcastBool(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension, int dimensionLength) {
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto dTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto dTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (!DataTypeUtils::isB(zType))
throw std::runtime_error("NativeOps::execBroadcastBool requires Z operand to have BOOL type");
if (yType != xType)
throw std::runtime_error("NativeOps::execBroadcastBool requires both X & Y operands to have same type");
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F3 opNum:[%i]\n", opNum);
dim3 launchDims(256, 256, 16384);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::broadcast::BroadcastBool, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES, BOOL_TYPES)
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param dY
* @param dYShapeInfo
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcast(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension, int dimensionLength) {
/*
cudaEvent_t start;
cudaEventCreateWithFlags(&start, cudaEventDisableTiming);
timespec tsX;
timespec tsY;
clock_gettime(CLOCK_REALTIME, &tsX);
*/
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto dTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto dTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F3 opNum:[%i]\n", opNum);
dim3 launchDims(256, 256, 16384);
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
void NativeOps::execReduceFloat(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("FF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isR(zType))
throw std::runtime_error("NativeOps::execReduceFloat requires Z operand to have floating point type");
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, FLOAT_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceFloat(...) failed");
}
void NativeOps::execReduceSame(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("SF8 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != xType)
throw datatype_exception::build("NativeOps::execReduceSame requires both X & Z operands to have same type", xType, zType);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceSame(...) failed");
}
void NativeOps::execReduceSame(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension, int dimensionLength) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("SF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto xRank = shape::rank(hXShapeInfo);
if (zType != xType)
throw datatype_exception::build("NativeOps::execReduceSame requires both X & Z operands to have same type", xType, zType);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceSame(...) failed");
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduceLong(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension,int dimensionLength) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("LF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != nd4j::DataType::INT64)
throw datatype_exception::build("NativeOps::execReduceLong wrong Z data type", nd4j::DataType::INT64, zType);
auto xRank = shape::rank(hXShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, LONG_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed");
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduceLong(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("LF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != nd4j::DataType::INT64)
throw datatype_exception::build("NativeOps::execReduceLong wrong Z data type", nd4j::DataType::INT64, zType);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, LONG_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed");
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduceBool(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension, int dimensionLength) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("BF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != nd4j::DataType::BOOL)
throw std::runtime_error("NativeOps::execReduceBool requires Z operand to have BOOL type");
auto xRank = shape::rank(hXShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, BOOL_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed");
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduceBool(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("BF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != nd4j::DataType::BOOL)
throw std::runtime_error("NativeOps::execReduceBool requires Z operand to have BOOL type");
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, BOOL_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed");
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduce(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension,int dimensionLength) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
Nd4jLong *hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
Nd4jLong *dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
Nd4jLong *dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F2 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
if (zType != nd4j::DataType::INT64)
throw datatype_exception::build("NativeOps::execIndexReduce requires Z operand to have INT64 type", zType);
auto dz = reinterpret_cast<Nd4jLong*>(dZ);
BUILD_SINGLE_SELECTOR(xType, functions::indexreduce::IndexReduce, ::executeIndexReduce(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, dZShapeInfo, shape::rank(hZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES);
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension,int dimensionLength) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F8 opNum:[%i]\n", opNum);
void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto xRank = shape::rank(hXShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX,dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, FLOAT_TYPES);
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
*/
void NativeOps::execIndexReduceScalar(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo){
if (nd4j::Environment::getInstance()->isDebug())
printf("F1 opNum:[%i]\n", opNum);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
// void *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1)
printf("AF1 opNum:[%i]\n", opNum);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
// FIXME: we want Z to be one of integer types
//if (!DataTypeUtils::isZ(zType))
// throw nd4j::datatype_exception("NativeOps::execIndexReduceScalar requires Z operand to have one of integer types")
if (zType != nd4j::DataType::INT64)
throw nd4j::datatype_exception::build("NativeOps::execIndexReduceScalar requires Z operand to have INT64 data type", zType);
auto dz = reinterpret_cast<Nd4jLong*>(dZ);
BUILD_SINGLE_SELECTOR(xType, functions::indexreduce::IndexReduce, ::executeIndexReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, nullptr, 0, nullptr, 0, 1, allocationPointer, reductionPointer, nullptr, nullptr), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execIndexReduceScalar(...) failed");
}
void NativeOps::execTransformSame(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims(512, 512, 16384);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (xType != zType)
throw std::runtime_error("NativeOps::execTransformSame requires X & Z to have same type");
//nd4j_printf("Going to execute transformSame; opNum: %i\n", opNum);
BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformSame, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execTransformSame(...) failed");
}
void NativeOps::execTransformBool(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims(512, 512, 16384);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isB(zType))
throw std::runtime_error("NativeOps::execTransformBool requires Z to have same boolean type");
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformBool, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, BOOL_TYPES);
}
void NativeOps::execTransformAny(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
switch (opNum) {
case transform::IsMax: {
bool scalarCheat = false;
if (extraParams == nullptr) {
scalarCheat = true;
}
auto special = reinterpret_cast<double *>(extraPointers[17]);
if (scalarCheat) {
auto scalarShape = ShapeBuilders::createScalarShapeInfo(nd4j::DataType::INT64);
/**
* In case of vector-input for IsMax, it just turns into IndexReduce call + further filler call
*/
execIndexReduceScalar(extraPointers, indexreduce::IndexMax, nullptr, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, scalarShape, special, nullptr);
Nd4jLong maxIdx = -119;
checkCudaErrors(cudaStreamSynchronize(*stream));
cudaMemcpyAsync(&maxIdx, special, sizeof(Nd4jLong), cudaMemcpyDeviceToHost, *stream);
checkCudaErrors(cudaStreamSynchronize(*stream));
int targetIdx = 0;
if (shape::order(hXShapeInfo) == 'c' || shape::order(hXShapeInfo) == 'f' && maxIdx * shape::stride(hXShapeInfo)[shape::rank(hXShapeInfo) - 1] >= shape::length(hXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hXShapeInfo)[shape::rank(hXShapeInfo) - 1];
dim3 launchDims(1, 512, 1024);
BUILD_SINGLE_SELECTOR(zType, fillIsMaxGeneric, (launchDims, stream, dZ, shape::length(hZShapeInfo), targetIdx), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed");
delete[] scalarShape;
} else {
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
auto hostTShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[19]);
auto tadMaxShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]);
auto tadMaxOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]);
int *dimension = reinterpret_cast<int *> (extraPointers[15]);
int dimensionLength = getDeviceId(extraPointers[18]);
// we call for IMax on specified dimension
execIndexReduce(extraPointers, indexreduce::IndexMax, nullptr, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, hostTShapeInfo, special, hostYShapeInfo, dimension, dimensionLength);
DEBUG_KERNEL(stream, opNum);
dim3 launchDims(256, 256, 16384);
// at this point, all IMax indexes are gathered, and we execute filler
BUILD_SINGLE_SELECTOR(zType, fillDimensionalIsMaxGeneric, (launchDims, stream, special, dZ, dZShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed");
}
}
break;
default: {
dim3 launchDims(512, 512, 16384);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformAny, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, LIBND4J_TYPES);
}
}
}
void NativeOps::execTransformStrict(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims(512, 512, 16384);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (xType != zType || !DataTypeUtils::isR(xType))
throw datatype_exception::build("NativeOps::execTransformStrict requires X & Z to have same floating point type", xType, zType);
switch (opNum) {
case transform::SoftMax:
case transform::SoftMaxDerivative:
case transform::LogSoftMax: {
if (shape::isVector(hXShapeInfo)) {
int length = shape::length(hXShapeInfo);
int block = nd4j::math::nd4j_min<int>(length, 256);
launchDims.x = 1;
launchDims.y = block;
launchDims.z += (block * sizeof(double) * 4);
BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), FLOAT_TYPES);
} else {
auto shape = shape::shapeOf(hXShapeInfo);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
// special pointer for special buffer for special ops
auto specialPointer = reinterpret_cast<double *>(extraPointers[6]);
auto dimension = reinterpret_cast<int *>(specialPointer);
auto maxDimension = dimension + 1;
auto maxShapeBuffer = reinterpret_cast<Nd4jLong *>(maxDimension + 1);
auto special = reinterpret_cast<double *> (maxShapeBuffer + (MAX_RANK * 2 + 4));
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
Nd4jLong maxShape[2] = {shape::shapeOf(hXShapeInfo)[0], 1};
auto hostMaxShapeBuffer = shape::shapeBuffer(2, xType, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
prepareShapeBuffer<<<1, 1, 128, *stream>>>(dimension, maxDimension, maxShapeBuffer, shape[0], xType);
DEBUG_KERNEL(stream, opNum);
//shape::printShapeInfo(maxShapeBuffer);
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceSame(tempPointers, reduce::Max, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, maxDimension, 1);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// sub 1
execBroadcast(tempPointers, broadcast::Subtract, hX, hXShapeInfo, dX, dXShapeInfo, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, hZShapeInfo, dZ, dZShapeInfo, dimension, 1);
DEBUG_KERNEL(stream, opNum);
// exp 3
execTransformStrict(extraPointers, transform::Exp, hZ, hZShapeInfo, dZ, dZShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceSame(tempPointers, reduce::Sum, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, maxDimension, 1);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// divide 3
execBroadcast(tempPointers, broadcast::Divide, hZ, hZShapeInfo, dZ, dZShapeInfo, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, hZShapeInfo, dZ, dZShapeInfo, dimension, 1);
DEBUG_KERNEL(stream, opNum);
// log 3
if (opNum == transform::LogSoftMax)
execTransformStrict(extraPointers, transform::Log, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, hZShapeInfo, dZ, dZShapeInfo, extraParams);
else if (opNum == transform::SoftMaxDerivative)
execTransformStrict(extraPointers, transform::SpecialDerivative, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, hZShapeInfo, dZ, dZShapeInfo, extraParams);
nd4j::DebugHelper::checkErrorCode(stream, "SoftMax(...) failed");
delete hostMaxShapeBuffer;
}
}
break;
default: {
BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), FLOAT_TYPES);
}
}
}
void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isR(zType))
throw datatype_exception::build("NativeOps::execTransformFloat requires Z to have floating point type", zType);
if (opNum == transform::Histogram) {
dim3 launchDims(256, 256, 32768);
Nd4jPointer maskedAllocPointer;
auto length = shape::length(hZShapeInfo);
cudaMalloc(reinterpret_cast<void **>(&maskedAllocPointer), length * launchDims.x * DataTypeUtils::sizeOf(nd4j::DataType::INT64));
auto imaskedAllocPointer = reinterpret_cast<int *>(maskedAllocPointer);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, imaskedAllocPointer, reductionPointer, nullptr, nullptr), LIBND4J_TYPES, FLOAT_TYPES);
checkCudaErrors(cudaStreamSynchronize(*stream));
cudaFree(maskedAllocPointer);
} else {
dim3 launchDims(512, 512, 16384);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, FLOAT_TYPES);
}
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param dZ the dZ array
* @param dZShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flatten(Nd4jPointer *extraPointers,
int offset,
char order,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hInput, Nd4jLong *hInputShapeInfo,
void *dInput, Nd4jLong *dInputShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F22 opNum:[7]\n");
// int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hYShapeInfo), 2, funcAttributes[30]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF222 opNum:[7]\n");
auto type = nd4j::ArrayOptions::dataType(hInputShapeInfo);
BUILD_SINGLE_SELECTOR(type, flattenKernelGeneric, (launchDims, stream, extraPointers, offset, order, dZ, dZShapeInfo, dInput, dInputShapeInfo), LIBND4J_TYPES);
DEBUG_KERNEL(stream, -1);
}
void NativeOps::checkP2P() {
int curDevice = 0;
cudaGetDevice(&curDevice);
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
bool tempSupport = true;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY)
continue;
int canAccess = 0;
cudaSetDevice(dX);
cudaDeviceCanAccessPeer(&canAccess, dX , dY);
if (!canAccess) {
tempSupport = false;
break;
}
}
}
supportedP2P = tempSupport;
cudaSetDevice(curDevice);
} else {
// if we have only 1 device - we say that we support P2P, since all data will be on 1 device
supportedP2P = true;
}
}
void NativeOps::enableP2P(bool enable) {
if (enable == allowedP2P)
return;
int curDevice = 0;
cudaGetDevice(&curDevice);
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY)
continue;
int canAccess = 0;
cudaSetDevice(dX);
cudaDeviceCanAccessPeer(&canAccess, dX , dY);
if (canAccess) {
if (enable) {
cudaDeviceEnablePeerAccess(dY, 0);
} else {
cudaDeviceDisablePeerAccess(dY);
}
} else {
if (nd4j::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY);
}
}
}
cudaSetDevice(curDevice);
}
allowedP2P = enable;
cudaSetDevice(curDevice);
}
bool NativeOps::isP2PAvailable() {
return supportedP2P;
}
void NativeOps::initializeDevicesAndFunctions() {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
deviceProperties = new cudaDeviceProp[devCnt];
for (int i = 0; i < devCnt; i++) {
cudaSetDevice(i);
cudaGetDeviceProperties(&deviceProperties[i], i);
cudaDeviceSetLimit(cudaLimitStackSize, 4096);
}
cudaSetDevice(0);
checkP2P();
// enabling p2p gpu access if it's supported
if (supportedP2P && devCnt > 1)
enableP2P(allowedP2P);
}
void NativeOps::initializeFunctions(Nd4jPointer *functions) {
nd4j::BlasHelper::getInstance()->initializeDeviceFunctions(functions);
/*
this->cublasSgemv = (CublasSgemv)functions[0];
this->cublasDgemv = (CublasDgemv)functions[1];
this->cublasHgemm = (CublasHgemm)functions[2];
this->cublasSgemm = (CublasSgemm)functions[3];
this->cublasDgemm = (CublasDgemm)functions[4];
this->cublasSgemmEx = (CublasSgemmEx)functions[5];
this->cublasHgemmBatched = (CublasHgemmBatched)functions[6];
this->cublasSgemmBatched = (CublasSgemmBatched)functions[7];
this->cublasDgemmBatched = (CublasDgemmBatched)functions[8];
*/
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocHost(Nd4jLong memorySize, int flags) {
Nd4jPointer pointer;
// cudaHostAllocMapped |cudaHostAllocPortable
cudaError_t res = cudaHostAlloc(reinterpret_cast<void **>(&pointer), memorySize, cudaHostAllocDefault);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocDevice(Nd4jLong memorySize, Nd4jPointer ptrToDeviceId, int flags) {
Nd4jPointer pointer;
cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&pointer), memorySize);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
int NativeOps::freeHost(Nd4jPointer pointer) {
cudaError_t res = cudaFreeHost(reinterpret_cast<void *>(pointer));
if (res != 0)
pointer = 0L;
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
int NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) {
cudaError_t res = cudaFree(reinterpret_cast<void *>(pointer));
if (res != 0)
pointer = 0L;
return 1L;
}
Nd4jPointer NativeOps::createContext() {
return 0L;
}
Nd4jPointer NativeOps::createStream() {
Nd4jPointer nativeStream = (Nd4jPointer) malloc(sizeof(cudaStream_t));
CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream");
cudaError_t dZ = cudaStreamCreate(reinterpret_cast<cudaStream_t *>(&nativeStream));
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaStreamCreate(...) failed");
return nativeStream;
}
Nd4jPointer NativeOps::createEvent() {
Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(cudaEvent_t));
CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer");
cudaError_t dZ = cudaEventCreateWithFlags(reinterpret_cast<cudaEvent_t *>(&nativeEvent), cudaEventDisableTiming);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaEventCreateWithFlags(...) failed");
return nativeEvent;
}
int NativeOps::registerEvent(Nd4jPointer event, Nd4jPointer stream) {
cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event);
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream);
cudaError_t dZ = cudaEventRecord(*pEvent, *pStream);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaEventRecord(...) failed");
return 1;
}
int NativeOps::setDevice(Nd4jPointer ptrToDeviceId) {
int deviceId = getDeviceId(ptrToDeviceId);
cudaError_t dZ = cudaSetDevice(deviceId);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaSetDevice(...) failed");
return 1;
}
Nd4jLong NativeOps::getDeviceFreeMemory(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
int orig = -1;
cudaGetDevice(&orig);
if (device >= 0 && device != orig) {
cudaSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
cudaSetDevice(orig);
}
return (Nd4jLong) memFree;
}
Nd4jLong NativeOps::getDeviceTotalMemory(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
int orig = -1;
cudaGetDevice(&orig);
if (device >= 0 && device != orig) {
cudaSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
cudaSetDevice(orig);
}
return (Nd4jLong) memTotal;
}
int NativeOps::memcpy(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
return memcpyAsync(dst, src, size, flags, reserved);
}
int NativeOps::memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved);
cudaMemcpyKind kind;
DEBUG_KERNEL(pStream, 0);
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
}
break;
case 1: {
kind = cudaMemcpyHostToDevice;
}
break;
case 2: {
kind = cudaMemcpyDeviceToHost;
}
case 3: {
kind = cudaMemcpyDeviceToDevice;
}
break;
default: {
printf("UNDEFINED MEMCPY!\n");
break;
}
}
cudaError_t dZ = cudaMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream);
if (dZ != 0) {
checkCudaErrors(dZ);
printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ));
fflush(stdout);
fflush(stderr);
throw std::runtime_error("cudaMemcpyAsync(...) failed");
//return 0L;
}
return 1;
}
int NativeOps::memset(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
cudaError_t dZ = cudaMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size));
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaMemset(...) failed");
return 1;
}
int NativeOps::memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved);
cudaError_t dZ = cudaMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaMemsetAsync(...) failed");
return 1;
}
int NativeOps::destroyEvent(Nd4jPointer event) {
cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event);
cudaError_t dZ = cudaEventDestroy(*pEvent);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaEvenDestroy(...) failed");
return 1;
}
int NativeOps::streamSynchronize(Nd4jPointer stream) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream);
cudaError_t dZ = cudaStreamSynchronize(*pStream);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaStreamSynchronize(...) failed");
return 1L;
}
int NativeOps::eventSynchronize(Nd4jPointer event) {
cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event);
cudaError_t dZ = cudaEventSynchronize(*pEvent);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaEventSynchronize(...) failed");
return 1L;
}
int NativeOps::getAvailableDevices() {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
return devCnt;
}
void NativeOps::enableDebugMode(bool reallyEnable) {
nd4j::Environment::getInstance()->setDebug(reallyEnable);
}
void NativeOps::setGridLimit(int gridSize) {
if (gridSize > 8192)
gridSize = 8192;
if (gridSize < 1)
gridSize = 1;
blockLimit = gridSize;
}
int NativeOps::ompGetMaxThreads() {
return maxThreads;
}
int NativeOps::ompGetNumThreads() {
return maxThreads;
}
void NativeOps::setOmpNumThreads(int threads) {
if (threads > 1024)
threads = 1024;
if (threads < 32)
threads = 32;
maxThreads = threads;
}
void NativeOps::enableVerboseMode(bool reallyEnable) {
nd4j::Environment::getInstance()->setVerbose(reallyEnable);
}
int NativeOps::getDeviceMajor(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
return deviceProperties[device].major;
}
int NativeOps::getDeviceMinor(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
return deviceProperties[device].minor;
}
const char * NativeOps::getDeviceName(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
return deviceProperties[device].name;
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::concat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data, Nd4jPointer *inputShapeInfo,
Nd4jPointer *ddata, Nd4jPointer *dinputShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hXShapeInfo = hZShapeInfo;
auto hShapePointers = reinterpret_cast<Nd4jLong **>(inputShapeInfo);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 8192;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hZShapeInfo) == 2 && shape::order(hZShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hShapePointers[i]) || shape::elementWiseStride(hShapePointers[i]) <= 0 ||
shape::order(hShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
// let's try to fit N-dimensional vstack
if (!isVstack && !isScalar && dimension == 0 && shape::order(hXShapeInfo) == 'c') {
auto length0 = shape::length(hShapePointers[0]);
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (shape::elementWiseStride(hShapePointers[i]) <= 0 || shape::order(hShapePointers[i]) != 'c' || length0 != shape::length(hShapePointers[i])) {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hZShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hShapePointers[i]) || shape::elementWiseStride(hShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going scalar concat\n");
dim3 launchDims(128, 128, 16384);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
BUILD_SINGLE_SELECTOR(zType, concatKernelScalarGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), dZ), LIBND4J_TYPES);
} else if (isVstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going VStack concat\n");
dim3 launchDims(128, 512, 16384);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
BUILD_SINGLE_SELECTOR(zType, concatKernelVStackGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), reinterpret_cast<Nd4jPointer *>(dinputShapeInfo[0]), dZ, dZShapeInfo), LIBND4J_TYPES);
} else if (isHstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going HStack concat\n");
dim3 launchDims(128, 128, 16384);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
BUILD_SINGLE_SELECTOR(zType, concatKernelHStackGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), reinterpret_cast<Nd4jPointer *>(dinputShapeInfo[0]), dZ, dZShapeInfo), LIBND4J_TYPES);
} else {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going generic concat\n");
auto devZTadShape = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto devZOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
dim3 launchDims(128, 128, 8192);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
BUILD_SINGLE_SELECTOR(zType, concatKernelGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), reinterpret_cast<Nd4jPointer *>(dinputShapeInfo[0]), dZ, dZShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]), devZTadShape, devZOffsets), LIBND4J_TYPES);
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("sharedMemory requested for concatFloat: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
cudaError_t res = cudaStreamSynchronize(*stream);
checkCudaErrors(res);
nd4j::DebugHelper::checkErrorCode(stream, "Legacy ConcatFloat(...) failed");
}
void NativeOps::specialConcat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
void *dZ,
Nd4jLong *dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
nd4j::SpecialMethods<float>::concatCpuGeneric(
dimension,
numArrays,
data,
inputShapeInfo,
dZ,
dZShapeInfo);
}
/**
* This method saves
*/
void NativeOps::tadOnlyShapeInfo(Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength, Nd4jLong *target, Nd4jLong *offsets) {
shape::TAD tad;
tad.init(dXShapeInfo, dimension, dimensionLength);
//tad->setOutputBuffer(target);
tad.createTadOnlyShapeInfo();
tad.createOffsets();
std::memcpy(reinterpret_cast<void *>(target), tad.tadOnlyShapeInfo, shape::shapeInfoByteLength(tad.tadOnlyShapeInfo));
std::memcpy(reinterpret_cast<void *>(offsets), tad.tadOffsets, tad.numTads * sizeof(Nd4jLong));
}
int NativeOps::memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved);
cudaMemcpyKind kind;
DEBUG_KERNEL(pStream, -1);
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
}
break;
case 1: {
kind = cudaMemcpyHostToDevice;
}
break;
case 2: {
kind = cudaMemcpyDeviceToHost;
}
case 3: {
kind = cudaMemcpyDeviceToDevice;
}
break;
}
//cudaError_t dZ = cudaMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream);
cudaError_t dZ = cudaMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaMemcpyToSymbolAsync(...) failed");
return 1;
}
Nd4jPointer NativeOps::getConstantSpace() {
Nd4jPointer dConstAddr;
cudaError_t dZ = cudaGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0)
throw std::runtime_error("cudaGetSymbolAddress(...) failed");
return dConstAddr;
}
void NativeOps::pullRows(Nd4jPointer *extraPointers,
void *x, Nd4jLong *xShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *z, Nd4jLong *zShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
Nd4jLong n,
Nd4jLong *indexes,
Nd4jLong *tadShapeInfo,
Nd4jLong *tadOffsets,
Nd4jLong *zTadShapeInfo,
Nd4jLong *zTadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims(64, 256, 1024);
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric, (launchDims, stream, dX, dZ, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets), LIBND4J_TYPES);
DEBUG_KERNEL(stream, -1);
}
void NativeOps::average(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jLong *xShapeInfo,
Nd4jPointer *dx, Nd4jLong *dXShapeInfo,
void *z, Nd4jLong *zShapeInfo,
void *dz, Nd4jLong *dzShapeInfo,
int n,
Nd4jLong length,
bool propagate) {
cudaStream_t * stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("averageFloat called\n");
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(256, 256, 4096);
// averagingKernelFloat<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(dX, dz, n, length, propagate);
BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate), LIBND4J_TYPES);
}
}
void NativeOps::accumulate(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jLong *xShapeInfo,
Nd4jPointer *dx, Nd4jLong *dXShapeInfo,
void *z, Nd4jLong *zShapeInfo,
void *dz, Nd4jLong *dzShapeInfo,
int n,
Nd4jLong length) {
auto stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("accumulateFloat called\n");
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(n, 256, 16384);
BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n,length), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length), LIBND4J_TYPES);
}
}
void NativeOps::shuffle(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jPointer *xShapeInfo,
Nd4jPointer *dx, Nd4jPointer *dXShapeInfo,
Nd4jPointer *z, Nd4jPointer *zShapeInfo,
Nd4jPointer *dz, Nd4jPointer *dZShapeInfo,
int N,
int *shuffleMap,
Nd4jPointer *tadShapeInfo,
Nd4jPointer *tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
auto dX = reinterpret_cast<void **>(dx);
auto dZ = reinterpret_cast<void **>(dz);
auto xShape = reinterpret_cast<Nd4jLong **>(xShapeInfo);
auto dxShape = reinterpret_cast<Nd4jLong **>(dXShapeInfo);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo);
auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets);
auto xType = nd4j::ArrayOptions::dataType(xShape[0]);
dim3 launchDims(N, 256, 8192);
BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric, (launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset), LIBND4J_TYPES);
DEBUG_KERNEL(stream, 0);
}
/*
void NativeOps::execMetaPredicateShape(Nd4jPointer *extras,
const int opTypeA,
const int opNumA,
const int opTypeB,
const int opNumB,
Nd4jLong N,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraA,
void *extraB,
double scalarA,
double scalarB) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
BUILD_SINGLE_SELECTOR(xType, functions::grid::GRIDShaped, ::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraA, extraB, scalarA, scalarB), LIBND4J_TYPES);
// functions::grid::GRIDShaped<float>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dX, dXShapeInfo, dy, dYShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
*/
bool NativeOps::isExperimentalEnabled() {
return nd4j::Environment::getInstance()->isExperimentalBuild();
}
void NativeOps::setOmpMinThreads(int threads) {
minThreads = nd4j::math::nd4j_max<int>(32, threads);
minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads);
}
int NativeOps::getDevice() {
int curDevice = -1;
cudaGetDevice(&curDevice);
return curDevice;
}
void NativeOps::setElementThreshold(int num) {
// this is no-op for CUDA
}
void NativeOps::setTADThreshold(int num) {
// this is no-op for CUDA
}
void NativeOps::execSummaryStats(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
bool biasCorrected) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(256, 256, 32768);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, nullptr), LIBND4J_TYPES, FLOAT_TYPES);
}
void NativeOps::execSummaryStats(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension,
int dimensionLength,
bool biasCorrected,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
dim3 launchDims = dim3(256, 256, 32768);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, biasCorrected, reductionPointer), LIBND4J_TYPES, FLOAT_TYPES);
}
void NativeOps::execReduce3(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
dim3 launchDims(256, 256, 32768);
if (xType != yType)
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Y operand to have X type", xType, yType);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), LIBND4J_TYPES, FLOAT_TYPES)
DEBUG_KERNEL(stream, opNum);
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduce3(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension,
int dimensionLength,
Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
if (xType != yType)
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Y operand to have X type", xType, yType);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), LIBND4J_TYPES, FLOAT_TYPES)
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduce3Scalar(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
if (xType != yType)
throw nd4j::datatype_exception::build("NativeOps::execReduce3Scalar requires Y operand to have X type", xType, yType);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3Scalar requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execScalar(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, allocationPointer, reductionPointer, nullptr), LIBND4J_TYPES, FLOAT_TYPES);
}
void NativeOps::execScalarBool(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hScalar, Nd4jLong *hScalarShapeInfo,
void *dScalar, Nd4jLong *dScalarShapeInfo,
void *extraParams) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(256, 512, 8192);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (xType != yType )
throw std::runtime_error("NativeOps::execScalarBool requires X & Y to have same type");
if (!DataTypeUtils::isB(zType) )
throw std::runtime_error("NativeOps::execScalarBool requires Z operand to have BOOL type");
BUILD_DOUBLE_SELECTOR(xType, zType, functions::scalar::ScalarBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalar, extraParams), LIBND4J_TYPES, BOOL_TYPES);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalarBool(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hScalars, Nd4jLong *hScalarShapeInfo,
void *dScalars, Nd4jLong *dScalarShapeInfo,
void *extraParams,
int *dimension,
int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims(256, 512, 8192);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (xType != yType )
throw nd4j::datatype_exception::build("NativeOps::execScalarBool requires X & Y to have same type", xType, yType);
if (!DataTypeUtils::isB(zType) )
throw nd4j::datatype_exception::build("NativeOps::execScalarBool requires Z operand to have BOOL type", nd4j::DataType::BOOL, zType);
BUILD_DOUBLE_SELECTOR(xType, yType, functions::scalar::ScalarBoolTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, BOOL_TYPES);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalar(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hScalar, Nd4jLong *hScalarShapeInfo,
void *dScalar, Nd4jLong *dScalarShapeInfo,
void *extraParams) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims(256, 512, 8192);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled())
throw nd4j::datatype_exception::build("NativeOps::execScalar both operands must have same data type", xType, yType);
if (!Environment::getInstance()->isExperimentalBuild() && Environment::getInstance()->isDebug()) {
auto sX = DataTypeUtils::asString(xType);
auto sY = DataTypeUtils::asString(yType);
auto sZ = DataTypeUtils::asString(zType);
nd4j_printf("Running execScalar with dtypes: [%s], [%s], [%s]\n", sX.c_str(), sY.c_str(), sZ.c_str());
}
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), LIBND4J_TYPES, LIBND4J_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), LIBND4J_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalar(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hScalars, Nd4jLong *hScalarShapeInfo,
void *dScalars, Nd4jLong *dScalarShapeInfo,
void *extraParams,
int *dimension,
int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled())
throw nd4j::datatype_exception::build("NativeOps::execScalar both operands must have same data type", xType, yType);
dim3 launchDims(256, 256, 16384);
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execAggregate(Nd4jPointer *extraPointers,
int opNum,
void **arguments,
int numArguments,
Nd4jLong **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
void *realArguments,
int numRealArguments,
nd4j::DataType dtype) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numBlocks, numThreads, shmem);
BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateKernelGeneric(launchDims, stream, opNum, arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), FLOAT_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execAggregateFloat(...) failed");
}
void NativeOps::execAggregateBatch(Nd4jPointer *extraPointers,
int numAggregates, int opNum,
int maxArgs, int maxShapes,
int maxIntArrays, int maxIntArraySize,
int maxIdx, int maxReals,
void *ptrToArguments, nd4j::DataType dtype) {
// not implemented yet
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numAggregates, numThreads, shmem);
BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateBatchKernelGeneric(launchDims, stream, opNum, numAggregates, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), FLOAT_TYPES);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execRandom(Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer stateHost,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraArguments) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto sizeOf = sizeof(nd4j::graph::RandomGenerator);
Nd4jPointer stateDevice;
cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf);
checkCudaErrors(cudaStreamSynchronize(*stream));
checkCudaErrors(cudaMemcpyAsync(stateDevice, stateHost, sizeOf, cudaMemcpyHostToDevice, *stream));
dim3 launchDims = dim3(512, 512, 32768);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
// functions::random::RandomFunction<float>::executeCudaSingle(launchDims, extraPointers, opNum, stateHost, dZ, dZShapeInfo, extraArguments),
BUILD_SINGLE_SELECTOR(zType, functions::random::RandomFunction, ::executeCudaSingle(launchDims, extraPointers, opNum, stateDevice, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES);
checkCudaErrors(cudaMemcpyAsync(stateHost, stateDevice, sizeOf, cudaMemcpyDeviceToHost, *stream));
checkCudaErrors(cudaStreamSynchronize(*stream));
cudaFree(stateDevice);
}
void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraArguments) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto sizeOf = sizeof(nd4j::graph::RandomGenerator);
Nd4jPointer stateDevice;
cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf);
checkCudaErrors(cudaStreamSynchronize(*stream));
checkCudaErrors(cudaMemcpyAsync(stateDevice, stateHost, sizeOf, cudaMemcpyHostToDevice, *stream));
dim3 launchDims = dim3(512, 512, 32768);
auto xType = nd4j::ArrayOptions::dataType(hZShapeInfo);
// functions::random::RandomFunction<float>::executeCudaDouble(launchDims, extraPointers, opNum, stateHost, dX, dXShapeInfo, dZ, dZShapeInfo, extraArguments);
BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction, ::executeCudaDouble(launchDims, extraPointers, opNum, stateDevice, dX, dXShapeInfo, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES);
checkCudaErrors(cudaMemcpyAsync(stateHost, stateDevice, sizeOf, cudaMemcpyDeviceToHost, *stream));
checkCudaErrors(cudaStreamSynchronize(*stream));
cudaFree(stateDevice);
}
void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraArguments) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto sizeOf = sizeof(nd4j::graph::RandomGenerator);
Nd4jPointer stateDevice;
cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf);
checkCudaErrors(cudaStreamSynchronize(*stream));
checkCudaErrors(cudaMemcpyAsync(stateDevice, stateHost, sizeOf, cudaMemcpyHostToDevice, *stream));
dim3 launchDims = dim3(512, 512, 32768);
auto xType = nd4j::ArrayOptions::dataType(hZShapeInfo);
// functions::random::RandomFunction<float>::executeCudaTriple(launchDims, extraPointers, opNum, stateHost, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments);
BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction, ::executeCudaTriple(launchDims, extraPointers, opNum, stateDevice, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES);
checkCudaErrors(cudaMemcpyAsync(stateHost, stateDevice, sizeOf, cudaMemcpyDeviceToHost, *stream));
checkCudaErrors(cudaStreamSynchronize(*stream));
cudaFree(stateDevice);
}
Nd4jPointer NativeOps::initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) {
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
// we don't synchronize at random initialization, it's safe to go unsync here
// cudaStreamSynchronize(*stream);
auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer);
auto buffer = new nd4j::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev));
buffer->propagateToDevice(buffer, *stream);
nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A");
// we generate sequence in the host memory
nd4j::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// and copy it to gpu
cudaMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, cudaMemcpyHostToDevice, *stream);
nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B");
return buffer;
}
void NativeOps::destroyRandom(Nd4jPointer ptrBuffer) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer);
// FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice
cudaDeviceSynchronize();
delete buffer;
}
void NativeOps::refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom);
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
cudaStreamSynchronize(*stream);
uint64_t *ptrDev = buffer->getDeviceBuffer();
// update rng state
buffer->setSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
// refresh buffer on host size
nd4j::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// copy back to gpu
cudaMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, cudaMemcpyHostToDevice, *stream);
}
void NativeOps::reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
cudaStreamSynchronize(*stream);
// update rng state
buffer->reSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
}
/**
* Return the length of a shape buffer
* based on the pointer
* @param buffer the buffer pointer to check
* @return
*/
int NativeOps::lengthForShapeBufferPointer(Nd4jPointer buffer) {
auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer);
return shape::shapeInfoLength(shape::rank(shapeBuffer));
}
/**
* The pointer to get the address for
*
* @param address the address to get the pointer
* @return the pointer for the given address
*/
Nd4jPointer NativeOps::pointerForAddress(Nd4jLong address) {
return reinterpret_cast<Nd4jPointer >(address);
}
void NativeOps::tear(Nd4jPointer *extras,
void *x, Nd4jLong *xShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
Nd4jPointer *targets,
Nd4jLong *zShapeInfo,
Nd4jLong *tadShapeInfo,
Nd4jLong *tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
dim3 launchDims(512, 512, 512);
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, tearKernelGeneric, (launchDims, stream, dX, dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed");
}
void prescanArrayRecursive(Nd4jPointer *extras, int *dZ, int *dX, int numElements, int level) {
auto stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
auto g_scanBlockSums = reinterpret_cast<int **>(&extras[2]);
int blockSize = 512; // max size of the thread blocks
int numBlocks = nd4j::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize))));
int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (nd4j::isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = nd4j::floorPow2(numElements);
int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
int numEltsLastBlock =
numElements - (numBlocks-1) * numEltsPerBlock;
int numThreadsLastBlock = nd4j::math::nd4j_max<int>(1, numEltsLastBlock / 2);
int np2LastBlock = 0;
int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
int extraSpace = numEltsPerBlock / NUM_BANKS;
int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace);
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
dim3 gridOnes(1, 1, 1);
dim3 threadsOnes(numThreadsLastBlock, 1, 1);
if (sharedMemSize < 2048)
sharedMemSize = 2048;
if (sharedMemLastBlock < 2048)
sharedMemLastBlock = 2048;
// execute the scan
if (numBlocks > 1) {
nd4j::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
nd4j::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be sdded to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1);
nd4j::uniformAdd<<<grid, threads, 1024, *stream>>>(dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
nd4j::uniformAdd<<<1, numThreadsLastBlock, 1024, *stream>>>(dZ, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
nd4j::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0);
} else {
nd4j::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0);
}
}
void NativeOps::encodeThresholdP1(Nd4jPointer *extras, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
dim3 launchDims(numBlocks, blockSize, 1024);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
BUILD_SINGLE_SELECTOR(xType, encoderKernelP1Generic, (launchDims, stream, dx, N, dz, threshold), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP1Float(...) failed");
}
void NativeOps::encodeThresholdP2Int(Nd4jPointer *extraPointers, int *dx, Nd4jLong N, int *dz) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
//encoderKernelP2Float<<<numBlocks, blockSize , 1024 * sizeof(float), *stream>>>(dx, N, dz);
prescanArrayRecursive(extraPointers, dz, dx + 1, (int) N, 0);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP2Int(...) failed");
}
void NativeOps::encodeThresholdP3(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, int *offsets, Nd4jLong N, int *dz){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
dim3 launchDims(numBlocks, blockSize, 4096);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
BUILD_SINGLE_SELECTOR(xType, encoderKernelP3Generic, (launchDims, stream, dx, offsets, N, dz), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP3Float(...) failed");
}
void NativeOps::decodeThreshold(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
// we probably want to have smaller blocks here, memory writes are misaligned anyway
int blockSize = 128;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
dim3 launchDims(numBlocks, blockSize, 1024);
auto zType = nd4j::ArrayOptions::dataType(zShapeInfo);
BUILD_SINGLE_SELECTOR(zType, decoderKernelGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "decodeThresholdFloat(...) failed");
}
void NativeOps::execReduce3All(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParamsVals,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets,
Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D119 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims(shape::length(hZShapeInfo), 256, 32768);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AD119 opNum:[%i]\n", opNum);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled())
throw nd4j::datatype_exception::build("NativeOps::execReduce3All both operands must have same data type", xType, yType);
if (yType != xType)
throw nd4j::datatype_exception::build("NativeOps::execReduce3All both operands must have same data type", xType, yType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execAll(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParamsVals, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets), LIBND4J_TYPES, FLOAT_TYPES);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::sort(Nd4jPointer *extraPointers,
void *x, Nd4jLong *xShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
bool descending) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[ 1]);
auto xLength = shape::length(xShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2*k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric, (launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES);
}
}
} else {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window<<=1) {
int n = window;
int rev = 0;
do{
int half = n >> 1;
BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric, (launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES);
n>>=1;
rev = 1;
} while(n > 1);
}
}
nd4j::DebugHelper::checkErrorCode(stream, "sort(...) failed");
}
void NativeOps::sortTad(Nd4jPointer *extraPointers,
void *x, Nd4jLong *xShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
int *dimension,
int dimensionLength,
Nd4jLong *tadShapeInfo,
Nd4jLong *tadOffsets,
bool descending) {
// to be implemented
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims(512, 512, 32768);
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, oesTadGeneric, (launchDims, stream, dX, dXShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "sortTadFloat(...) failed");
}
void NativeOps::sortCooIndices(Nd4jPointer *extraPointers, Nd4jLong *indices, void *values, Nd4jLong length, int rank) {
throw std::runtime_error("sortCooIndices:: Not implemented yet");
}
Nd4jLong NativeOps::encodeBitmap(Nd4jPointer *extraPointers,
void *dx, Nd4jLong *hXShapeInfo,
Nd4jLong N,
int *dz,
float threshold) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *resultPointer = reinterpret_cast<int *>(extraPointers[2]);
int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims(512, 512, 32768);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
BUILD_SINGLE_SELECTOR(xType, cudaEncodeBitmapGeneric, (launchDims, stream, dx, N, dz, resultPointer, reductionPointer, threshold), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "encodeBitmapFloat(...) failed");
Nd4jLong dZ = (Nd4jLong) resultPointer[0];
resultPointer[0] = 0;
return dZ;
}
void NativeOps::decodeBitmap(Nd4jPointer *extraPointers,
void *dx,
Nd4jLong N,
void *dz, Nd4jLong *zShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims(512, 512, 16384);
auto xType = nd4j::ArrayOptions::dataType(zShapeInfo);
BUILD_SINGLE_SELECTOR(xType, cudaDecodeBitmapGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "decodeBitmapFloat(...) failed");
}
Nd4jLong* NativeOps::mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) {
return nullptr;
}
void NativeOps::munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) {
}
nd4j::graph::ResultWrapper* NativeOps::executeFlatGraph(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
return nullptr;
}
const char* NativeOps::getAllCustomOps() {
return nd4j::ops::OpRegistrator::getInstance()->getAllCustomOperations();
}
nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
nd4j::graph::VariableSpace varSpace;
Context block(2, &varSpace);
nd4j::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++) {
auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
// we shouldn't copy buffer if that's empty array
void *buffer_ = nd4j::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
auto array = new nd4j::NDArray(buffer_, shape_);
array->triggerAllocationFlag(false, false);
// block should contain references to proper variable
varSpace.putVariable(1, e, array);
block.pickInput(1, e);
inShapes.push_back(shape_);
}
auto shapeList = op->calculateOutputShape(&inShapes, block);
if (varSpace.workspace() != nullptr)
shapeList->detach();
return shapeList;
}
nd4j::ShapeList* NativeOps::calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
Context block(1);
nd4j::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++)
inShapes.push_back(reinterpret_cast<Nd4jLong *>(inputShapes[e]));
auto shapeList = op->calculateOutputShape(&inShapes, block);
return shapeList;
}
nd4j::ShapeList* NativeOps::calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
static FORCEINLINE Nd4jStatus realExec(nd4j::ops::DeclarableOp* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) {
if (op == nullptr)
nd4j_printf("Can't find requested operation: [%lld]\n", hash);
// we're using the same fake nodeId everywhere here
std::vector<nd4j::NDArray*> inputs(numInputs);
std::vector<nd4j::NDArray*> outputs(numOutputs);
std::vector<double> ttArgs(numTArgs);
std::vector<bool> bbArgs(0);
std::vector<Nd4jLong> iiArgs(numIArgs);
// filling block now with inputs
for (int e = 0; e < numInputs; e++) {
auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
inputs[e] = new nd4j::NDArray(buffer, shape);
}
// if not inplace - transferring output arrays
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
// we want to keep original output shape intact
auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e]));
void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e];
auto array = new nd4j::NDArray(buffer, shape);
outputs[e] = array;
// and we want to release shape copy once we're done
array->triggerAllocationFlag(false, true);
}
for (int e = 0; e < numIArgs; e++)
iiArgs[e] = iArgs[e];
for (int e = 0; e < numTArgs; e++)
ttArgs[e] = tArgs[e];
// hypothetically at this point we have everything filled
auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, isInplace);
//auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace);
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
//shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]);
//shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo());
//outputs[e]->printIndexedBuffer("C++ raw output");
//outputs[e]->printBuffer("C++ indexed output");
if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])))
outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])));
}
/*
if (!isInplace) {
if (dZ->size() != numOutputs) {
return ND4J_STATUS_BAD_OUTPUT;
}
for (int e = 0; e < numOutputs; e++) {
auto buffer = (T *) outputBuffers[e];
auto shape = (int *) outputShapes[e];
nd4j::NDArray<T> tmp(buffer, shape);
if (tmp.lengthOf() != dZ->at(e)->lengthOf()) {
nd4j_printf("Provided output array for [%s] has length of %i, but actual dZ has length of %i\n", op->getOpName()->c_str(), tmp.lengthOf(), dZ->at(e)->lengthOf());
return ND4J_STATUS_BAD_OUTPUT;
}
tmp.assign(dZ->at(e));
}
} else {
// if op is inplace, our ResultSet holds pointers
dZ->purge();
}
delete dZ;
*/
for (auto v: inputs)
delete v;
for (auto v: outputs)
delete v;
return Status::OK();
}
int NativeOps::execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash);
return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace);
}
int NativeOps::registerGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) {
auto graph = nd4j::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer);
nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph);
return ND4J_STATUS_OK;
}
static VariablesSet* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
auto graph = nd4j::graph::GraphHolder::getInstance()->pullGraph(graphId);
auto varSpace = graph->getVariableSpace()->clone();
std::vector<nd4j::NDArray*> handles;
for (int e = 0; e < numInputs; e++) {
auto idx = inputIndices[e];
// we'll delete this array later, together with cloned VariableSpace
auto array = new nd4j::NDArray(inputBuffers[e], reinterpret_cast<Nd4jLong *>(inputShapes[e]));
handles.emplace_back(array);
if (varSpace->hasVariable(idx)) {
auto var = varSpace->getVariable(idx);
if (var->hasNDArray())
delete var->getNDArray();
var->setNDArray(array);
} else
varSpace->putVariable(idx, array);
}
auto dZ = nd4j::graph::GraphExecutioner::execute(graph, varSpace);
auto varSet = new nd4j::graph::VariablesSet(dZ);
if (dZ == ND4J_STATUS_OK) {
// pull back results, and provide them
auto outputs = graph->fetchOutputs();
for (int e = 0; e < outputs->size(); e++) {
// we're only getting variable ID/Index from original grap. values will be taken from cloned workspace
std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index());
auto var = varSpace->getVariable(varId);
varSet->push_back(var->clone());
}
delete outputs;
}
delete varSpace;
return varSet;
}
VariablesSet* NativeOps::executeStoredGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
}
int NativeOps::unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) {
nd4j::graph::GraphHolder::getInstance()->dropGraphAny(graphId);
return ND4J_STATUS_OK;
}
void NativeOps::deletePointerArray(Nd4jPointer pointer) {
Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer);
delete[] ptr;
}
void NativeOps::deleteIntArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<int *>(pointer);
delete[] ptr;
}
void NativeOps::deleteLongArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<Nd4jLong *>(pointer);
delete[] ptr;
}
template <typename T>
static void deleteVariablesSetT(Nd4jPointer pointer) {
nd4j::graph::VariablesSet* ptr = reinterpret_cast<nd4j::graph::VariablesSet*>(pointer);
delete ptr;
}
void NativeOps::deleteVariablesSet(Nd4jPointer pointer) {
deleteVariablesSetT<double>(pointer);
}
void NativeOps::deleteShapeList(Nd4jPointer shapeList) {
nd4j::ShapeList* list = reinterpret_cast<nd4j::ShapeList*>(shapeList);
list->destroy();
delete list;
}
const char* NativeOps::getAllOperations() {
return nd4j::OpTracker::getInstance()->exportOperations();
}
Nd4jPointer NativeOps::getGraphState(Nd4jLong id) {
return (Nd4jPointer) new nd4j::graph::GraphState(id);
}
void NativeOps::deleteGraphState(Nd4jPointer state) {
auto stateP = reinterpret_cast<nd4j::graph::GraphState*>(state);
delete stateP;
}
Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, nd4j::graph::GraphState *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
/**
* That's basically exec, with VariableSpace provided in GraphState:
* depending on operation (i.e. while of if), different logic executors could be used
*/
auto graph = state->graph();
auto varSpace = state->variableSpace();
// Node is dynamically created, and has nothing beyond it: only inputs and outputs
// this node has id of 0, and inputs are
Node node(OpType_LOGIC, opHash, 0);
// mapping inputs
for (int e = 0; e < numInputs; e++) {
auto buffer = inputBuffers[e];
auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
auto array = new nd4j::NDArray(buffer, shapeInfo, varSpace->workspace());
// now we just put array to VarSpace
varSpace->putVariable(0, e, array);
node.pickInput(0, e);
}
// mapping scopes
for (int e = 0; e < numScopes; e++) {
// we should check scope existence in GraphState/Graph
int scopeId = (int) scopes[e];
if (!state->hasScope(scopeId)) {
// nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId);
return Status::THROW();
}
node.pickInput(scopeId, 0);
}
auto dZ = LogicExecutor::processNode(graph, &node);
if (dZ != Status::OK())
return dZ;
// mapping outputs
for (int e = 0; e < numOutputs; e++) {
auto buffer = outputBuffers[e];
auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]);
NDArray array(buffer, shapeInfo, varSpace->workspace());
// now we just put array to VarSpace to the same ID
//varSpace->putVariable(0, e, array);
auto t = varSpace->getVariable(0, e)->getNDArray();
array.assign(t);
}
// removing input variables
for (int e = 0; e < numInputs; e++) {
varSpace->dropVariable(0, e);
}
// after some bla-bla-bla we should have Graph and Node for current op
return Status::OK();
}
Nd4jStatus NativeOps::execCustomOpWithScope(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
return execCustomOpWithScope(extraPointers, reinterpret_cast<nd4j::graph::GraphState*>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs);
}
void NativeOps::deleteResultWrapper(Nd4jPointer ptr) {
// just 0 room for compiler s@!t
auto p = reinterpret_cast<nd4j::graph::ResultWrapper *>(ptr);
delete p;
}
int NativeOps::estimateThreshold(Nd4jPointer *extraPointers, Nd4jPointer dX, Nd4jLong *dXShapeInfo, int N, float threshold) {
throw std::runtime_error("estimateThreshold: Not implemented yet");
}
/*
* TypeDef:
* void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, long N, int dstType, Nd4jPointer dZ);
*/
void NativeOps::convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, Nd4jLong N, int dstType, Nd4jPointer dZ) {
auto dx = reinterpret_cast<void *>(dX);
auto dz = reinterpret_cast<void *>(dZ);
if (srcType == ND4J_FLOAT8) {
if (dstType == ND4J_FLOAT8) {
// convertKernel<double, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT8) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<nd4j::int8, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
//convertKernel<nd4j::int8, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: eventually we might want to add it
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_UINT8) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<uint8_t, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: still might want to add
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT16) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<float16, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: .... ^^^
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//nd4j::convertToThreshold<float16>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT16) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<int16_t, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO...
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT24) {
} else if (srcType == ND4J_FLOAT32) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<float, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//nd4j::convertToThreshold<float>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_DOUBLE) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<double, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//
} else if (dstType == ND4J_THRESHOLD) {
//nd4j::convertToThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_THRESHOLD) {
if (dstType == ND4J_FLOAT16) {
//nd4j::convertFromThreshold<float16>(nullptr, dx, N, dz);
} else if (dstType == ND4J_FLOAT32) {
//nd4j::convertFromThreshold<float>(nullptr, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//nd4j::convertFromThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
}
Nd4jPointer NativeOps::createUtf8String(Nd4jPointer *extraPointers, const char *string, int length) {
auto u = new nd4j::utf8string(string, length);
return reinterpret_cast<Nd4jPointer>(u);
}
void NativeOps::deleteUtf8String(Nd4jPointer *extraPointers, Nd4jPointer ptr) {
delete(reinterpret_cast<nd4j::utf8string*>(ptr));
}
|
e15f23ebeee78de232ac1384311e267cd6997b4a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cstdio>
#include "hip/hip_vector_types.h"
extern "C" {
__global__ void cuAdd(int* list, int* elements, int i, int listSize){
int thid = blockIdx.x * blockDim.x + threadIdx.x;
while(thid < listSize){
int value = list[thid] + elements[i];
list[thid+listSize] = value;
thid += blockDim.x * gridDim.x;
}
}
__device__ int2 findMedian(int* tabA, int a, int b, int* tabB, int c, int d);
__global__ void cuPartition(int j, int* prevList, int4* H, int size){
int* newList = prevList+size;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadCounts = 1 << (j-1);
while (tid < threadCounts){
int medianId = tid + threadCounts;
int a = H[medianId].x;
int b = H[medianId].y;
int c = H[medianId].z;
int d = H[medianId].w;
int2 ef = findMedian(prevList, a, b, newList, c, d);
H[2*medianId].x = a;
H[2*medianId].y = ef.x;
H[2*medianId].z = c;
H[2*medianId].w = ef.y;
H[2*medianId + 1].x = ef.x;
H[2*medianId + 1].y = b;
H[2*medianId + 1].z = ef.y;
H[2*medianId + 1].w = d;
tid += blockDim.x * gridDim.x;
}
}
__device__ void mergeInc(int* listA, int beginA, int endA, int* listB, int beginB, int endB, int* result);
__global__ void cuMergeIncreasing(int* lists, int4* H, int listSize, int threads, int* result){
int* newList = lists + listSize;
int tid = blockIdx.x * blockDim.x + threadIdx.x + 1;
while(tid <= threads){
int medianId = tid + threads - 1;
int4 localFetch = H[medianId];
int a = localFetch.x;
int b = localFetch.y;
int c = localFetch.z;
int d = localFetch.w;
mergeInc(lists, a, b, newList, c, d, result);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cuPrune(int* listA, int sizeA, int* listB, int sizeB, int* found, int2* pickedBlocks, int* pickedBlocksCounter, int M){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockDim.x*gridDim.x;
int chunkA = (sizeA + k - 1)/ (k);
int chunkB = (sizeB + k - 1)/ (k);
for(int j = 0; j < k ; j++){
if(*found) return;
int x = listA[tid * chunkA] + listB[(j+1) * chunkB - 1]; // mozemy wyskoczyc jesli chunkA lub ChunbB nie dzieli k
int y = listA[(tid+1) * chunkA - 1] + listB[j * chunkB]; // mozemy wyskoczyc tez
if (x == M || y == M) atomicExch(found, 1);
else if(x < M && y > M){
int pos = atomicAdd(pickedBlocksCounter, 1);
pickedBlocks[pos].x = tid;
pickedBlocks[pos].y = j;
}
}
}
__device__ bool searchSteep(int* listA, int chunkSizeA, int* listB, int chunkSizeB, int M){
int a, b;
a = b = 0;
while(a < chunkSizeA && b < chunkSizeB){
int value = listA[a] + listB[b];
if(value == M) return true;
if(value < M) a++;
else b++;
}
return false;
}
__global__ void cuSearch(int* listA, int sizeA, int* listB, int sizeB, int2* pickedBlocks, int* noPickedBlocks, int* found, int M){
int thid = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockDim.x*gridDim.x;
int chunkA = (sizeA + k - 1)/ (k);
int chunkB = (sizeB + k - 1)/ (k);
while(thid < *noPickedBlocks){
if(*found) return;
int2 idsOfFragmentToCheck = pickedBlocks[thid];
int* shiftedListA = listA + idsOfFragmentToCheck.x * chunkA;
int* shiftedListB = listB + idsOfFragmentToCheck.y * chunkB;
int _sizeA = thid != k-1 ? chunkA : sizeA % chunkA;
int _sizeB = thid != k-1 ? chunkB : sizeB % chunkB;
bool f = searchSteep(shiftedListA, _sizeA, shiftedListB, _sizeB, M);
if(f) *found = true;
thid += k;
}
}
__global__ void cuReverse(int* tab, int size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid >= size/2)
return;
int tmp = tab[tid];
tab[tid] = tab[size-tid-1];
tab[size-tid-1] = tmp;
}
__device__ int binsearchInc(int* tab, int l, int r, int value){
while(l < r){
int m = (l + r) / 2;
if(tab[m] >= value){
r = m;
} else{
l = m+1;
}
}
return l;
}
__device__ int2 findMedian(int* tabA, int a, int b, int* tabB, int c, int d){
int aMiddle, bMiddle, otherBegin, otherEnd, otherValue;
int* otherTab;
if(b-a > d-c){
aMiddle = (b + a) / 2;
otherTab = tabB;
otherBegin = c;
otherEnd = d;
otherValue = tabA[aMiddle];
//bMiddle = binsearchInc(tabB, c, d, tabA[aMiddle]);
} else{
bMiddle = (c + d) / 2;
otherTab = tabA;
otherBegin = a;
otherEnd = b;
otherValue = tabB[bMiddle];
//aMiddle = binsearchInc(tabA, a, b, tabB[bMiddle]);
}
int theOtherMiddle = binsearchInc(otherTab, otherBegin, otherEnd, otherValue);
if(b-a > d-c){
bMiddle = theOtherMiddle;
} else{
aMiddle = theOtherMiddle;
}
int2 result;
result.x = aMiddle;
result.y = bMiddle;
return result;
}
__device__ inline void mergeInc(int* listA, int beginA, int endA, int* listB, int beginB, int endB, int* result){
int position = beginA + beginB;
while(beginA < endA && beginB < endB){
if (listA[beginA] < listB[beginB]){
result[position++] = listA[beginA++];
} else{
result[position++] = listB[beginB++];
}
}
while(beginA < endA){
result[position++] = listA[beginA++];
}
while(beginB < endB){
result[position++] = listB[beginB++];
}
}
}
|
e15f23ebeee78de232ac1384311e267cd6997b4a.cu
|
#include<cstdio>
#include "vector_types.h"
extern "C" {
__global__ void cuAdd(int* list, int* elements, int i, int listSize){
int thid = blockIdx.x * blockDim.x + threadIdx.x;
while(thid < listSize){
int value = list[thid] + elements[i];
list[thid+listSize] = value;
thid += blockDim.x * gridDim.x;
}
}
__device__ int2 findMedian(int* tabA, int a, int b, int* tabB, int c, int d);
__global__ void cuPartition(int j, int* prevList, int4* H, int size){
int* newList = prevList+size;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadCounts = 1 << (j-1);
while (tid < threadCounts){
int medianId = tid + threadCounts;
int a = H[medianId].x;
int b = H[medianId].y;
int c = H[medianId].z;
int d = H[medianId].w;
int2 ef = findMedian(prevList, a, b, newList, c, d);
H[2*medianId].x = a;
H[2*medianId].y = ef.x;
H[2*medianId].z = c;
H[2*medianId].w = ef.y;
H[2*medianId + 1].x = ef.x;
H[2*medianId + 1].y = b;
H[2*medianId + 1].z = ef.y;
H[2*medianId + 1].w = d;
tid += blockDim.x * gridDim.x;
}
}
__device__ void mergeInc(int* listA, int beginA, int endA, int* listB, int beginB, int endB, int* result);
__global__ void cuMergeIncreasing(int* lists, int4* H, int listSize, int threads, int* result){
int* newList = lists + listSize;
int tid = blockIdx.x * blockDim.x + threadIdx.x + 1;
while(tid <= threads){
int medianId = tid + threads - 1;
int4 localFetch = H[medianId];
int a = localFetch.x;
int b = localFetch.y;
int c = localFetch.z;
int d = localFetch.w;
mergeInc(lists, a, b, newList, c, d, result);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cuPrune(int* listA, int sizeA, int* listB, int sizeB, int* found, int2* pickedBlocks, int* pickedBlocksCounter, int M){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockDim.x*gridDim.x;
int chunkA = (sizeA + k - 1)/ (k);
int chunkB = (sizeB + k - 1)/ (k);
for(int j = 0; j < k ; j++){
if(*found) return;
int x = listA[tid * chunkA] + listB[(j+1) * chunkB - 1]; // mozemy wyskoczyc jesli chunkA lub ChunbB nie dzieli k
int y = listA[(tid+1) * chunkA - 1] + listB[j * chunkB]; // mozemy wyskoczyc tez
if (x == M || y == M) atomicExch(found, 1);
else if(x < M && y > M){
int pos = atomicAdd(pickedBlocksCounter, 1);
pickedBlocks[pos].x = tid;
pickedBlocks[pos].y = j;
}
}
}
__device__ bool searchSteep(int* listA, int chunkSizeA, int* listB, int chunkSizeB, int M){
int a, b;
a = b = 0;
while(a < chunkSizeA && b < chunkSizeB){
int value = listA[a] + listB[b];
if(value == M) return true;
if(value < M) a++;
else b++;
}
return false;
}
__global__ void cuSearch(int* listA, int sizeA, int* listB, int sizeB, int2* pickedBlocks, int* noPickedBlocks, int* found, int M){
int thid = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockDim.x*gridDim.x;
int chunkA = (sizeA + k - 1)/ (k);
int chunkB = (sizeB + k - 1)/ (k);
while(thid < *noPickedBlocks){
if(*found) return;
int2 idsOfFragmentToCheck = pickedBlocks[thid];
int* shiftedListA = listA + idsOfFragmentToCheck.x * chunkA;
int* shiftedListB = listB + idsOfFragmentToCheck.y * chunkB;
int _sizeA = thid != k-1 ? chunkA : sizeA % chunkA;
int _sizeB = thid != k-1 ? chunkB : sizeB % chunkB;
bool f = searchSteep(shiftedListA, _sizeA, shiftedListB, _sizeB, M);
if(f) *found = true;
thid += k;
}
}
__global__ void cuReverse(int* tab, int size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid >= size/2)
return;
int tmp = tab[tid];
tab[tid] = tab[size-tid-1];
tab[size-tid-1] = tmp;
}
__device__ int binsearchInc(int* tab, int l, int r, int value){
while(l < r){
int m = (l + r) / 2;
if(tab[m] >= value){
r = m;
} else{
l = m+1;
}
}
return l;
}
__device__ int2 findMedian(int* tabA, int a, int b, int* tabB, int c, int d){
int aMiddle, bMiddle, otherBegin, otherEnd, otherValue;
int* otherTab;
if(b-a > d-c){
aMiddle = (b + a) / 2;
otherTab = tabB;
otherBegin = c;
otherEnd = d;
otherValue = tabA[aMiddle];
//bMiddle = binsearchInc(tabB, c, d, tabA[aMiddle]);
} else{
bMiddle = (c + d) / 2;
otherTab = tabA;
otherBegin = a;
otherEnd = b;
otherValue = tabB[bMiddle];
//aMiddle = binsearchInc(tabA, a, b, tabB[bMiddle]);
}
int theOtherMiddle = binsearchInc(otherTab, otherBegin, otherEnd, otherValue);
if(b-a > d-c){
bMiddle = theOtherMiddle;
} else{
aMiddle = theOtherMiddle;
}
int2 result;
result.x = aMiddle;
result.y = bMiddle;
return result;
}
__device__ inline void mergeInc(int* listA, int beginA, int endA, int* listB, int beginB, int endB, int* result){
int position = beginA + beginB;
while(beginA < endA && beginB < endB){
if (listA[beginA] < listB[beginB]){
result[position++] = listA[beginA++];
} else{
result[position++] = listB[beginB++];
}
}
while(beginA < endA){
result[position++] = listA[beginA++];
}
while(beginB < endB){
result[position++] = listB[beginB++];
}
}
}
|
53cb62e6490a0d993a047c53b49fbe61156eb9ac.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2010-2015, Raymond Tay, Singapore
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil_inline.h>
// includes, kernels
#include <pearsoncoefficient_kernel.cu>
#include "reduction_kernel.cu"
#include "reduction.h"
extern "C"
bool isPow2(unsigned int x)
{
return ((x&(x-1))==0);
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
template<class T> T reduceCPU(T*data, int size);
extern "C"
void computeGold( float* reference, float* idata, const unsigned blocks, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
hipSetDevice( cutGetMaxGflopsDeviceId() );
unsigned int timer = 0;
cutilCheckError( cutCreateTimer( &timer));
cutilCheckError( cutStartTimer( timer));
// unsigned int N = 1 << 24; // 1 << 24 will produce a NaN on GT330M Cuda Toolkit 3.2
unsigned int N = 1 << 8;
unsigned int num_of_threads = 512;
unsigned int mem_size = sizeof( float) * N;
// allocate host memory to represent vectors v1 & v2
float* h_idata = (float*) malloc( mem_size);
float* h_jdata = (float*) malloc( mem_size);
float* h_powidata = (float*) malloc( mem_size);
float* h_powjdata = (float*) malloc( mem_size);
float* h_aggdata = (float*) malloc( mem_size);
// initalize the memory
float t;
for( unsigned int i = t = 0, t = pow((float)i, 2); i < N; ++i, t=pow((float)i,2))
{
h_idata[i] = (float) i;
h_jdata[i] = (float) i;
h_powidata[i] = (float) t;
h_powjdata[i] = (float) t;
h_aggdata[i] = (float)(h_idata[i]*h_jdata[i]);
}
// allocate device memory to represent vectors v1 & v2
float* d_idata;
float* d_jdata;
float* d_powidata;
float* d_powjdata;
float* d_aggdata;
cutilSafeCall( hipMalloc( (void**) &d_idata, mem_size));
cutilSafeCall( hipMalloc( (void**) &d_jdata, mem_size));
cutilSafeCall( hipMalloc( (void**) &d_powidata, mem_size));
cutilSafeCall( hipMalloc( (void**) &d_powjdata, mem_size));
cutilSafeCall( hipMalloc( (void**) &d_aggdata, mem_size));
// copy host memory to device
cutilSafeCall( hipMemcpy( d_idata, h_idata, mem_size, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy( d_idata, h_jdata, mem_size, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy( d_powidata, h_powidata, mem_size, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy( d_powidata, h_powjdata, mem_size, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy( d_aggdata, h_aggdata, mem_size, hipMemcpyHostToDevice) );
// allocate device memory for result
float* d_odata;
float* d_o2data;
float* d_powodata;
float* d_powo2data;
float* d_aggodata;
unsigned int blocks = N%num_of_threads == 0? N/num_of_threads: 1+N/num_of_threads;
cutilSafeCall( hipMalloc( (void**) &d_odata, sizeof(float)*blocks ));
cutilSafeCall( hipMalloc( (void**) &d_o2data, sizeof(float)*blocks ));
cutilSafeCall( hipMalloc( (void**) &d_powodata, sizeof(float)*blocks ));
cutilSafeCall( hipMalloc( (void**) &d_powo2data, sizeof(float)*blocks ));
cutilSafeCall( hipMalloc( (void**) &d_aggodata, sizeof(float)*blocks ));
// Conduct parallel sum reduction
reduce<float>(N, num_of_threads, blocks, 6, d_idata, d_odata);
reduce<float>(N, num_of_threads, blocks, 6, d_jdata, d_o2data);
reduce<float>(N, num_of_threads, blocks, 6, d_powidata, d_powodata);
reduce<float>(N, num_of_threads, blocks, 6, d_powjdata, d_powo2data);
reduce<float>(N, num_of_threads, blocks, 6, d_aggdata, d_aggodata);
// allocate mem for the result on host side
float* h_odata = (float*) malloc( sizeof(float)*blocks );
float* h_o2data = (float*) malloc( sizeof(float)*blocks );
float* h_powodata = (float*) malloc( sizeof(float)*blocks );
float* h_powo2data = (float*) malloc( sizeof(float)*blocks );
float* h_aggodata = (float*) malloc( sizeof(float)*blocks );
// copy result from device to host
cutilSafeCallNoSync( hipMemcpy( h_odata, d_odata, sizeof(float)*blocks , hipMemcpyDeviceToHost) );
cutilSafeCallNoSync( hipMemcpy( h_o2data, d_o2data, sizeof(float)*blocks , hipMemcpyDeviceToHost) );
cutilSafeCallNoSync( hipMemcpy( h_powodata, d_powodata, sizeof(float)*blocks , hipMemcpyDeviceToHost) );
cutilSafeCallNoSync( hipMemcpy( h_powo2data, d_powo2data, sizeof(float)*blocks , hipMemcpyDeviceToHost) );
cutilSafeCallNoSync( hipMemcpy( h_aggodata, d_aggodata, sizeof(float)*blocks , hipMemcpyDeviceToHost) );
// compute reference soln
// computeGold(h_odata, h_idata, blocks, N);
// computeGold(h_odata, h_jdata, blocks, N);
// computeGold(h_powodata, h_powidata, blocks, N);
// computeGold(h_powodata, h_powjdata, blocks, N);
cutilCheckError( cutStopTimer( timer));
printf( "Processing time: %f (ms)\n", cutGetTimerValue( timer));
cutilCheckError( cutDeleteTimer( timer));
float sum1 = reduceCPU<float>(h_odata, blocks);
float sum2 = reduceCPU<float>(h_o2data, blocks);
float sum1sq = reduceCPU<float>(h_powodata, blocks);
float sum2sq = reduceCPU<float>(h_powo2data, blocks);
float psum = reduceCPU<float>(h_aggodata, blocks);
float num = psum - (sum1*sum2)/N;
float den = sqrt((sum1sq-pow(sum1,2)/N)*(sum2sq-pow(sum2,2)/N));
if (den == 0)
printf("Den is zero\n");
else
printf("Den is %f\n", 1.0 - num/den);
// cleanup memory
free( h_idata);
free( h_jdata);
free( h_odata);
free( h_o2data);
free( h_powidata);
free( h_powjdata);
free( h_powodata);
free( h_powo2data);
cutilSafeCall(hipFree(d_idata));
cutilSafeCall(hipFree(d_odata));
cutilSafeCall(hipFree(d_jdata));
cutilSafeCall(hipFree(d_o2data));
cutilSafeCall(hipFree(d_powidata));
cutilSafeCall(hipFree(d_powodata));
cutilSafeCall(hipFree(d_powjdata));
cutilSafeCall(hipFree(d_powo2data));
hipDeviceReset();
}
|
53cb62e6490a0d993a047c53b49fbe61156eb9ac.cu
|
// Copyright (c) 2010-2015, Raymond Tay, Singapore
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil_inline.h>
// includes, kernels
#include <pearsoncoefficient_kernel.cu>
#include "reduction_kernel.cu"
#include "reduction.h"
extern "C"
bool isPow2(unsigned int x)
{
return ((x&(x-1))==0);
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
template<class T> T reduceCPU(T*data, int size);
extern "C"
void computeGold( float* reference, float* idata, const unsigned blocks, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
unsigned int timer = 0;
cutilCheckError( cutCreateTimer( &timer));
cutilCheckError( cutStartTimer( timer));
// unsigned int N = 1 << 24; // 1 << 24 will produce a NaN on GT330M Cuda Toolkit 3.2
unsigned int N = 1 << 8;
unsigned int num_of_threads = 512;
unsigned int mem_size = sizeof( float) * N;
// allocate host memory to represent vectors v1 & v2
float* h_idata = (float*) malloc( mem_size);
float* h_jdata = (float*) malloc( mem_size);
float* h_powidata = (float*) malloc( mem_size);
float* h_powjdata = (float*) malloc( mem_size);
float* h_aggdata = (float*) malloc( mem_size);
// initalize the memory
float t;
for( unsigned int i = t = 0, t = pow((float)i, 2); i < N; ++i, t=pow((float)i,2))
{
h_idata[i] = (float) i;
h_jdata[i] = (float) i;
h_powidata[i] = (float) t;
h_powjdata[i] = (float) t;
h_aggdata[i] = (float)(h_idata[i]*h_jdata[i]);
}
// allocate device memory to represent vectors v1 & v2
float* d_idata;
float* d_jdata;
float* d_powidata;
float* d_powjdata;
float* d_aggdata;
cutilSafeCall( cudaMalloc( (void**) &d_idata, mem_size));
cutilSafeCall( cudaMalloc( (void**) &d_jdata, mem_size));
cutilSafeCall( cudaMalloc( (void**) &d_powidata, mem_size));
cutilSafeCall( cudaMalloc( (void**) &d_powjdata, mem_size));
cutilSafeCall( cudaMalloc( (void**) &d_aggdata, mem_size));
// copy host memory to device
cutilSafeCall( cudaMemcpy( d_idata, h_idata, mem_size, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy( d_idata, h_jdata, mem_size, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy( d_powidata, h_powidata, mem_size, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy( d_powidata, h_powjdata, mem_size, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy( d_aggdata, h_aggdata, mem_size, cudaMemcpyHostToDevice) );
// allocate device memory for result
float* d_odata;
float* d_o2data;
float* d_powodata;
float* d_powo2data;
float* d_aggodata;
unsigned int blocks = N%num_of_threads == 0? N/num_of_threads: 1+N/num_of_threads;
cutilSafeCall( cudaMalloc( (void**) &d_odata, sizeof(float)*blocks ));
cutilSafeCall( cudaMalloc( (void**) &d_o2data, sizeof(float)*blocks ));
cutilSafeCall( cudaMalloc( (void**) &d_powodata, sizeof(float)*blocks ));
cutilSafeCall( cudaMalloc( (void**) &d_powo2data, sizeof(float)*blocks ));
cutilSafeCall( cudaMalloc( (void**) &d_aggodata, sizeof(float)*blocks ));
// Conduct parallel sum reduction
reduce<float>(N, num_of_threads, blocks, 6, d_idata, d_odata);
reduce<float>(N, num_of_threads, blocks, 6, d_jdata, d_o2data);
reduce<float>(N, num_of_threads, blocks, 6, d_powidata, d_powodata);
reduce<float>(N, num_of_threads, blocks, 6, d_powjdata, d_powo2data);
reduce<float>(N, num_of_threads, blocks, 6, d_aggdata, d_aggodata);
// allocate mem for the result on host side
float* h_odata = (float*) malloc( sizeof(float)*blocks );
float* h_o2data = (float*) malloc( sizeof(float)*blocks );
float* h_powodata = (float*) malloc( sizeof(float)*blocks );
float* h_powo2data = (float*) malloc( sizeof(float)*blocks );
float* h_aggodata = (float*) malloc( sizeof(float)*blocks );
// copy result from device to host
cutilSafeCallNoSync( cudaMemcpy( h_odata, d_odata, sizeof(float)*blocks , cudaMemcpyDeviceToHost) );
cutilSafeCallNoSync( cudaMemcpy( h_o2data, d_o2data, sizeof(float)*blocks , cudaMemcpyDeviceToHost) );
cutilSafeCallNoSync( cudaMemcpy( h_powodata, d_powodata, sizeof(float)*blocks , cudaMemcpyDeviceToHost) );
cutilSafeCallNoSync( cudaMemcpy( h_powo2data, d_powo2data, sizeof(float)*blocks , cudaMemcpyDeviceToHost) );
cutilSafeCallNoSync( cudaMemcpy( h_aggodata, d_aggodata, sizeof(float)*blocks , cudaMemcpyDeviceToHost) );
// compute reference soln
// computeGold(h_odata, h_idata, blocks, N);
// computeGold(h_odata, h_jdata, blocks, N);
// computeGold(h_powodata, h_powidata, blocks, N);
// computeGold(h_powodata, h_powjdata, blocks, N);
cutilCheckError( cutStopTimer( timer));
printf( "Processing time: %f (ms)\n", cutGetTimerValue( timer));
cutilCheckError( cutDeleteTimer( timer));
float sum1 = reduceCPU<float>(h_odata, blocks);
float sum2 = reduceCPU<float>(h_o2data, blocks);
float sum1sq = reduceCPU<float>(h_powodata, blocks);
float sum2sq = reduceCPU<float>(h_powo2data, blocks);
float psum = reduceCPU<float>(h_aggodata, blocks);
float num = psum - (sum1*sum2)/N;
float den = sqrt((sum1sq-pow(sum1,2)/N)*(sum2sq-pow(sum2,2)/N));
if (den == 0)
printf("Den is zero\n");
else
printf("Den is %f\n", 1.0 - num/den);
// cleanup memory
free( h_idata);
free( h_jdata);
free( h_odata);
free( h_o2data);
free( h_powidata);
free( h_powjdata);
free( h_powodata);
free( h_powo2data);
cutilSafeCall(cudaFree(d_idata));
cutilSafeCall(cudaFree(d_odata));
cutilSafeCall(cudaFree(d_jdata));
cutilSafeCall(cudaFree(d_o2data));
cutilSafeCall(cudaFree(d_powidata));
cutilSafeCall(cudaFree(d_powodata));
cutilSafeCall(cudaFree(d_powjdata));
cutilSafeCall(cudaFree(d_powo2data));
cudaThreadExit();
}
|
003de3ca150f92cd27f1c3509310ff31ccb1d4a5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "hl_base.h"
#include "hl_device_functions.cuh"
#include "CosSimOp.h"
namespace paddle {
template<int block_size>
__global__ void KeCosSim(real* output,
const real* input1,
const real* input2,
int width,
int input1_height,
int input2_height,
real scale) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
__shared__ real xx[block_size];
__shared__ real yy[block_size];
__shared__ real xy[block_size];
xx[tid] = 0.0;
yy[tid] = 0.0;
xy[tid] = 0.0;
__syncthreads();
input1 += ty * width;
if (input2_height > 1) {
input2 += ty * width;
}
for (int index = tid; index < width; index += block_size) {
real x = input1[index];
real y = input2[index];
xx[tid] += x * x;
yy[tid] += y * y;
xy[tid] += x * y;
}
__syncthreads();
for (int s = block_size / 2; s > 0; s >>= 1) {
if (tid < s) {
xx[tid] += xx[tid + s];
yy[tid] += yy[tid + s];
xy[tid] += xy[tid + s];
}
__syncthreads();
}
if (tid == 0) {
output[ty] = scale * xy[0] / (sqrt(xx[0]) * sqrt(yy[0]));
}
}
void hlCossim(real* output,
const real* input1,
const real* input2,
size_t width,
size_t input1_height,
size_t input2_height,
real scale) {
CHECK_NOTNULL(output);
CHECK_NOTNULL(input1);
CHECK_NOTNULL(input2);
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(1, input1_height);
hipLaunchKernelGGL(( KeCosSim<block_size>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT,
output, input1, input2, width, input1_height, input2_height, scale);
CHECK_SYNC("hlCossim failed");
}
template <>
void CosSimForward<DEVICE_TYPE_GPU>(GpuMatrix& out_mat,
const GpuMatrix& in1_mat,
const GpuMatrix& in2_mat,
real scale) {
CHECK(out_mat.getData() && in1_mat.getData() && in2_mat.getData());
CHECK(in1_mat.useGpu_ == true && in2_mat.useGpu_ == true)
<< "Matrix type are not GPU";
size_t dim = in1_mat.getWidth();
real* out = out_mat.getData();
const real* x = in1_mat.getData();
const real* y = in2_mat.getData();
hlCossim(out, x, y, dim, in1_mat.getHeight(), in2_mat.getHeight(), scale);
}
template<int block_size>
__global__ void KeCosSimDerivative(const real* grad,
const real* output,
const real* prev_out_x,
const real* prev_out_y,
real* prev_grad_x,
real* prev_grad_y,
size_t width,
size_t input1_height,
size_t input2_height,
real scale) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
__shared__ real xx[block_size];
__shared__ real yy[block_size];
__shared__ real xy[block_size];
xx[tid] = 0.0;
yy[tid] = 0.0;
xy[tid] = 0.0;
__syncthreads();
prev_out_x += ty * width;
prev_grad_x += ty * width;
if (input2_height > 1) {
prev_out_y += ty * width;
prev_grad_y += ty * width;
}
for (int index = tid; index < width; index += block_size) {
real x = prev_out_x[index];
real y = prev_out_y[index];
xx[tid] += x * x;
yy[tid] += y * y;
xy[tid] += x * y;
}
__syncthreads();
for (int s = block_size / 2; s > 0; s >>= 1) {
if (tid < s) {
xx[tid] += xx[tid + s];
yy[tid] += yy[tid + s];
xy[tid] += xy[tid + s];
}
__syncthreads();
}
if (xy[0] == 0) {
real reciprocal = 1.0 / (sqrt(xx[0]) * sqrt(yy[0]));
for (int index = tid; index < width; index += block_size) {
prev_grad_x[index] +=
scale * grad[ty] * prev_out_y[index] * reciprocal;
if (input2_height > 1) {
prev_grad_y[index] +=
scale * grad[ty] * prev_out_x[index] * reciprocal;
} else {
paddle::paddleAtomicAdd(prev_grad_y + index,
scale * grad[ty] * prev_out_x[index] * reciprocal);
}
}
} else {
real reciprocalXY = 1.0 / xy[0];
real reciprocalSquareSumX = 1.0 / xx[0];
real reciprocalSquareSumY = 1.0 / yy[0];
for (int index = tid; index < width; index += block_size) {
prev_grad_x[index] += output[ty] * grad[ty] *
(prev_out_y[index] * reciprocalXY -
prev_out_x[index] * reciprocalSquareSumX);
if (input2_height > 1) {
prev_grad_y[index] += output[ty] * grad[ty] *
(prev_out_x[index] * reciprocalXY -
prev_out_y[index] * reciprocalSquareSumY);
} else {
paddle::paddleAtomicAdd(prev_grad_y + index, output[ty] * grad[ty] *
(prev_out_x[index] * reciprocalXY -
prev_out_y[index] * reciprocalSquareSumY));
}
}
}
}
void hlCossimDerivative(const real* grad,
const real* output,
const real* prev_out_x,
const real* prev_out_y,
real* prev_grad_x,
real* prev_grad_y,
size_t width,
size_t input1_height,
size_t input2_height,
real scale) {
CHECK_NOTNULL(grad);
CHECK_NOTNULL(output);
CHECK_NOTNULL(prev_out_x);
CHECK_NOTNULL(prev_out_y);
CHECK_NOTNULL(prev_grad_x);
CHECK_NOTNULL(prev_grad_y);
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(1, input1_height);
hipLaunchKernelGGL(( KeCosSimDerivative<block_size>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT,
grad, output, prev_out_x, prev_out_y, prev_grad_x, prev_grad_y, width,
input1_height, input2_height, scale);
CHECK_SYNC("hlCossimDerivate failed");
}
template <>
void CosSimBackward<DEVICE_TYPE_GPU>(const GpuMatrix& out_grad,
const GpuMatrix& out_val,
const GpuMatrix& in1_val,
const GpuMatrix& in2_val,
GpuMatrix& in1_grad,
GpuMatrix& in2_grad,
real scale) {
CHECK(out_grad.getData() && out_val.getData() && in1_val.getData() &&
in2_val.getData() && in1_grad.getData() && in2_grad.getData());
CHECK(out_grad.useGpu_ && out_val.useGpu_ && in1_val.useGpu_
&& in2_val.useGpu_ && in1_grad.useGpu_ && in2_grad.useGpu_)
<< "Matrix types are not equally GPU";
size_t dim = in1_val.getWidth();
const real* grad = out_grad.getData();
const real* out = out_val.getData();
const real* prev_out_x = in1_val.getData();
const real* prev_out_y = in2_val.getData();
real* prev_grad_x = in1_grad.getData();
real* prev_grad_y = in2_grad.getData();
hlCossimDerivative(grad,
out,
prev_out_x,
prev_out_y,
prev_grad_x,
prev_grad_y,
dim,
in1_val.getHeight(),
in2_val.getHeight(),
scale);
}
} // namespace paddle
|
003de3ca150f92cd27f1c3509310ff31ccb1d4a5.cu
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "hl_base.h"
#include "hl_device_functions.cuh"
#include "CosSimOp.h"
namespace paddle {
template<int block_size>
__global__ void KeCosSim(real* output,
const real* input1,
const real* input2,
int width,
int input1_height,
int input2_height,
real scale) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
__shared__ real xx[block_size];
__shared__ real yy[block_size];
__shared__ real xy[block_size];
xx[tid] = 0.0;
yy[tid] = 0.0;
xy[tid] = 0.0;
__syncthreads();
input1 += ty * width;
if (input2_height > 1) {
input2 += ty * width;
}
for (int index = tid; index < width; index += block_size) {
real x = input1[index];
real y = input2[index];
xx[tid] += x * x;
yy[tid] += y * y;
xy[tid] += x * y;
}
__syncthreads();
for (int s = block_size / 2; s > 0; s >>= 1) {
if (tid < s) {
xx[tid] += xx[tid + s];
yy[tid] += yy[tid + s];
xy[tid] += xy[tid + s];
}
__syncthreads();
}
if (tid == 0) {
output[ty] = scale * xy[0] / (sqrt(xx[0]) * sqrt(yy[0]));
}
}
void hlCossim(real* output,
const real* input1,
const real* input2,
size_t width,
size_t input1_height,
size_t input2_height,
real scale) {
CHECK_NOTNULL(output);
CHECK_NOTNULL(input1);
CHECK_NOTNULL(input2);
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(1, input1_height);
KeCosSim<block_size><<<grid, threads, 0, STREAM_DEFAULT>>>
(output, input1, input2, width, input1_height, input2_height, scale);
CHECK_SYNC("hlCossim failed");
}
template <>
void CosSimForward<DEVICE_TYPE_GPU>(GpuMatrix& out_mat,
const GpuMatrix& in1_mat,
const GpuMatrix& in2_mat,
real scale) {
CHECK(out_mat.getData() && in1_mat.getData() && in2_mat.getData());
CHECK(in1_mat.useGpu_ == true && in2_mat.useGpu_ == true)
<< "Matrix type are not GPU";
size_t dim = in1_mat.getWidth();
real* out = out_mat.getData();
const real* x = in1_mat.getData();
const real* y = in2_mat.getData();
hlCossim(out, x, y, dim, in1_mat.getHeight(), in2_mat.getHeight(), scale);
}
template<int block_size>
__global__ void KeCosSimDerivative(const real* grad,
const real* output,
const real* prev_out_x,
const real* prev_out_y,
real* prev_grad_x,
real* prev_grad_y,
size_t width,
size_t input1_height,
size_t input2_height,
real scale) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
__shared__ real xx[block_size];
__shared__ real yy[block_size];
__shared__ real xy[block_size];
xx[tid] = 0.0;
yy[tid] = 0.0;
xy[tid] = 0.0;
__syncthreads();
prev_out_x += ty * width;
prev_grad_x += ty * width;
if (input2_height > 1) {
prev_out_y += ty * width;
prev_grad_y += ty * width;
}
for (int index = tid; index < width; index += block_size) {
real x = prev_out_x[index];
real y = prev_out_y[index];
xx[tid] += x * x;
yy[tid] += y * y;
xy[tid] += x * y;
}
__syncthreads();
for (int s = block_size / 2; s > 0; s >>= 1) {
if (tid < s) {
xx[tid] += xx[tid + s];
yy[tid] += yy[tid + s];
xy[tid] += xy[tid + s];
}
__syncthreads();
}
if (xy[0] == 0) {
real reciprocal = 1.0 / (sqrt(xx[0]) * sqrt(yy[0]));
for (int index = tid; index < width; index += block_size) {
prev_grad_x[index] +=
scale * grad[ty] * prev_out_y[index] * reciprocal;
if (input2_height > 1) {
prev_grad_y[index] +=
scale * grad[ty] * prev_out_x[index] * reciprocal;
} else {
paddle::paddleAtomicAdd(prev_grad_y + index,
scale * grad[ty] * prev_out_x[index] * reciprocal);
}
}
} else {
real reciprocalXY = 1.0 / xy[0];
real reciprocalSquareSumX = 1.0 / xx[0];
real reciprocalSquareSumY = 1.0 / yy[0];
for (int index = tid; index < width; index += block_size) {
prev_grad_x[index] += output[ty] * grad[ty] *
(prev_out_y[index] * reciprocalXY -
prev_out_x[index] * reciprocalSquareSumX);
if (input2_height > 1) {
prev_grad_y[index] += output[ty] * grad[ty] *
(prev_out_x[index] * reciprocalXY -
prev_out_y[index] * reciprocalSquareSumY);
} else {
paddle::paddleAtomicAdd(prev_grad_y + index, output[ty] * grad[ty] *
(prev_out_x[index] * reciprocalXY -
prev_out_y[index] * reciprocalSquareSumY));
}
}
}
}
void hlCossimDerivative(const real* grad,
const real* output,
const real* prev_out_x,
const real* prev_out_y,
real* prev_grad_x,
real* prev_grad_y,
size_t width,
size_t input1_height,
size_t input2_height,
real scale) {
CHECK_NOTNULL(grad);
CHECK_NOTNULL(output);
CHECK_NOTNULL(prev_out_x);
CHECK_NOTNULL(prev_out_y);
CHECK_NOTNULL(prev_grad_x);
CHECK_NOTNULL(prev_grad_y);
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(1, input1_height);
KeCosSimDerivative<block_size><<<grid, threads, 0, STREAM_DEFAULT>>>
(grad, output, prev_out_x, prev_out_y, prev_grad_x, prev_grad_y, width,
input1_height, input2_height, scale);
CHECK_SYNC("hlCossimDerivate failed");
}
template <>
void CosSimBackward<DEVICE_TYPE_GPU>(const GpuMatrix& out_grad,
const GpuMatrix& out_val,
const GpuMatrix& in1_val,
const GpuMatrix& in2_val,
GpuMatrix& in1_grad,
GpuMatrix& in2_grad,
real scale) {
CHECK(out_grad.getData() && out_val.getData() && in1_val.getData() &&
in2_val.getData() && in1_grad.getData() && in2_grad.getData());
CHECK(out_grad.useGpu_ && out_val.useGpu_ && in1_val.useGpu_
&& in2_val.useGpu_ && in1_grad.useGpu_ && in2_grad.useGpu_)
<< "Matrix types are not equally GPU";
size_t dim = in1_val.getWidth();
const real* grad = out_grad.getData();
const real* out = out_val.getData();
const real* prev_out_x = in1_val.getData();
const real* prev_out_y = in2_val.getData();
real* prev_grad_x = in1_grad.getData();
real* prev_grad_y = in2_grad.getData();
hlCossimDerivative(grad,
out,
prev_out_x,
prev_out_y,
prev_grad_x,
prev_grad_y,
dim,
in1_val.getHeight(),
in2_val.getHeight(),
scale);
}
} // namespace paddle
|
ea90966318e5f747a4deecdd58c70cb314978cde.hip
|
// !!! This is a file automatically generated by hipify!!!
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
#include <math.h>
#include "../NeuralNetwork/Activation/ActivationFunction.cu"
extern "C"
{
__constant__ int D_INPUT_UNITS;
__constant__ int D_HIDDEN_UNITS;
__constant__ int D_OUTPUT_UNITS;
__constant__ ActivationFunctionEnum D_ACTIVATION_FUNCTION;
__global__ void FeedforwardHiddenKernel(float *input, float *hiddenActivations, float *previousHiddenActivations, float *hiddenActivationDerivatives, float *inputWeights, float *recurrentWeights)
{
int unitId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (unitId < D_HIDDEN_UNITS)
{
float weightedSum = 0;
int weightId = unitId * D_INPUT_UNITS;
for (int i = 0; i < D_INPUT_UNITS; i++)
{
weightedSum += inputWeights[weightId] * input[i];
weightId++;
}
weightId = unitId * D_HIDDEN_UNITS;
for (int i = 0; i < D_HIDDEN_UNITS; i++)
{
weightedSum += recurrentWeights[weightId] * previousHiddenActivations[i];
weightId++;
}
hiddenActivations[unitId] = Evaluate(D_ACTIVATION_FUNCTION, weightedSum);
hiddenActivationDerivatives[unitId] = EvaluateDerivative(D_ACTIVATION_FUNCTION, weightedSum);
}
}
__global__ void FeedforwardOutputKernel(float *hiddenActivations, float *outputActivations, float *outputActivationDerivatives, float *outputWeights)
{
int unitId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (unitId < D_OUTPUT_UNITS)
{
float weightedSum = 0;
int weightId = unitId * D_HIDDEN_UNITS;
for (int i = 0; i < D_HIDDEN_UNITS; i++)
{
weightedSum += outputWeights[weightId] * hiddenActivations[i];
weightId++;
}
outputActivations[unitId] = Evaluate(D_ACTIVATION_FUNCTION, weightedSum);
outputActivationDerivatives[unitId] = EvaluateDerivative(D_ACTIVATION_FUNCTION, weightedSum);
}
}
}
|
ea90966318e5f747a4deecdd58c70cb314978cde.cu
|
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
#include <math.h>
#include "../NeuralNetwork/Activation/ActivationFunction.cu"
extern "C"
{
__constant__ int D_INPUT_UNITS;
__constant__ int D_HIDDEN_UNITS;
__constant__ int D_OUTPUT_UNITS;
__constant__ ActivationFunctionEnum D_ACTIVATION_FUNCTION;
__global__ void FeedforwardHiddenKernel(float *input, float *hiddenActivations, float *previousHiddenActivations, float *hiddenActivationDerivatives, float *inputWeights, float *recurrentWeights)
{
int unitId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (unitId < D_HIDDEN_UNITS)
{
float weightedSum = 0;
int weightId = unitId * D_INPUT_UNITS;
for (int i = 0; i < D_INPUT_UNITS; i++)
{
weightedSum += inputWeights[weightId] * input[i];
weightId++;
}
weightId = unitId * D_HIDDEN_UNITS;
for (int i = 0; i < D_HIDDEN_UNITS; i++)
{
weightedSum += recurrentWeights[weightId] * previousHiddenActivations[i];
weightId++;
}
hiddenActivations[unitId] = Evaluate(D_ACTIVATION_FUNCTION, weightedSum);
hiddenActivationDerivatives[unitId] = EvaluateDerivative(D_ACTIVATION_FUNCTION, weightedSum);
}
}
__global__ void FeedforwardOutputKernel(float *hiddenActivations, float *outputActivations, float *outputActivationDerivatives, float *outputWeights)
{
int unitId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (unitId < D_OUTPUT_UNITS)
{
float weightedSum = 0;
int weightId = unitId * D_HIDDEN_UNITS;
for (int i = 0; i < D_HIDDEN_UNITS; i++)
{
weightedSum += outputWeights[weightId] * hiddenActivations[i];
weightId++;
}
outputActivations[unitId] = Evaluate(D_ACTIVATION_FUNCTION, weightedSum);
outputActivationDerivatives[unitId] = EvaluateDerivative(D_ACTIVATION_FUNCTION, weightedSum);
}
}
}
|
b1da20fb639ae8d182b16410778851787b712d25.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lab1.h"
#include <math.h>
#define S 1000
#define fps 24
static const unsigned W = 640;
static const unsigned H = 480;
static const unsigned NFRAME = 240;
__device__ const unsigned w = 640;
__device__ const unsigned h = 480;
int *tempx;
int *tempy;
struct Lab1VideoGenerator::Impl {
int t = 0;
};
Lab1VideoGenerator::Lab1VideoGenerator(): impl(new Impl) {
}
Lab1VideoGenerator::~Lab1VideoGenerator() {}
void Lab1VideoGenerator::get_info(Lab1VideoInfo &info) {
info.w = W;
info.h = H;
info.n_frame = NFRAME;
// fps = 24/1 = 24
info.fps_n = 24;
info.fps_d = 1;
};
__device__ void draw(int x1, int y1, uint8_t *yuv) {
if(x1>=0 && x1<w && y1>=0 && y1<h)
yuv[x1+y1*w] = 255;
}
__global__ void printStripe(int *x, int *y, int time, uint8_t *yuv, int len) {
int idx = blockIdx.x+blockDim.x + threadIdx.x;
if(idx > S) return;
int x1 = x[idx]*4;
int y1 = y[idx]*4;
int dx = x1 - w/2;
int dy = y1 - h/2;
if(abs(dx) > abs(dy)) {
if(x1 < w) {
for(int i=x1; i>(x1 -len); i--)
draw(i, y1+dy*(i-x1)/dx, yuv);
} else {
for(int i=x1; i<(x1 +len); i++)
draw(i, y1+dy*(i-x1)/dx, yuv);
}
} else {
if(y1 < h) {
for(int i=y1; i>(y1 -len); i--)
draw(x1+dx*(i-y1)/dy, i, yuv);
} else {
for(int i=y1; i<(y1 +len); i++)
draw(x1+dx*(i-y1)/dy, i, yuv);
}
}
}
void Lab1VideoGenerator::Generate(uint8_t *yuv) {
int *x;
int *y;
hipMalloc(&x, S*sizeof(int));
hipMemcpy(x, tempx, S*sizeof(int), hipMemcpyHostToDevice);
hipMalloc(&y, S*sizeof(int));
hipMemcpy(y, tempy, S*sizeof(int), hipMemcpyHostToDevice);
hipMemset(yuv, 0, W*H);
hipLaunchKernelGGL(( printStripe), dim3(S/4+1), dim3(16), 0, 0, x, y, 0, yuv, impl->t);
hipMemset(yuv+W*H, 128, W*H/2);
hipDeviceSynchronize();
hipMemset(yuv+W*H, 128, W*H/2);
++(impl->t);
}
|
b1da20fb639ae8d182b16410778851787b712d25.cu
|
#include "lab1.h"
#include <math.h>
#define S 1000
#define fps 24
static const unsigned W = 640;
static const unsigned H = 480;
static const unsigned NFRAME = 240;
__device__ const unsigned w = 640;
__device__ const unsigned h = 480;
int *tempx;
int *tempy;
struct Lab1VideoGenerator::Impl {
int t = 0;
};
Lab1VideoGenerator::Lab1VideoGenerator(): impl(new Impl) {
}
Lab1VideoGenerator::~Lab1VideoGenerator() {}
void Lab1VideoGenerator::get_info(Lab1VideoInfo &info) {
info.w = W;
info.h = H;
info.n_frame = NFRAME;
// fps = 24/1 = 24
info.fps_n = 24;
info.fps_d = 1;
};
__device__ void draw(int x1, int y1, uint8_t *yuv) {
if(x1>=0 && x1<w && y1>=0 && y1<h)
yuv[x1+y1*w] = 255;
}
__global__ void printStripe(int *x, int *y, int time, uint8_t *yuv, int len) {
int idx = blockIdx.x+blockDim.x + threadIdx.x;
if(idx > S) return;
int x1 = x[idx]*4;
int y1 = y[idx]*4;
int dx = x1 - w/2;
int dy = y1 - h/2;
if(abs(dx) > abs(dy)) {
if(x1 < w) {
for(int i=x1; i>(x1 -len); i--)
draw(i, y1+dy*(i-x1)/dx, yuv);
} else {
for(int i=x1; i<(x1 +len); i++)
draw(i, y1+dy*(i-x1)/dx, yuv);
}
} else {
if(y1 < h) {
for(int i=y1; i>(y1 -len); i--)
draw(x1+dx*(i-y1)/dy, i, yuv);
} else {
for(int i=y1; i<(y1 +len); i++)
draw(x1+dx*(i-y1)/dy, i, yuv);
}
}
}
void Lab1VideoGenerator::Generate(uint8_t *yuv) {
int *x;
int *y;
cudaMalloc(&x, S*sizeof(int));
cudaMemcpy(x, tempx, S*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc(&y, S*sizeof(int));
cudaMemcpy(y, tempy, S*sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(yuv, 0, W*H);
printStripe<<<S/4+1, 16>>>(x, y, 0, yuv, impl->t);
cudaMemset(yuv+W*H, 128, W*H/2);
cudaDeviceSynchronize();
cudaMemset(yuv+W*H, 128, W*H/2);
++(impl->t);
}
|
aaaff13613e1243b89ebea415b6d277d71809d7b.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019 Opticks Team. All Rights Reserved.
*
* This file is part of Opticks
* (see https://bitbucket.org/simoncblyth/opticks).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// http://docs.nvidia.com/cuda/hiprand/device-api-overview.html#thrust-and-hiprand-example
// http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MTGP/mtgp3.pdf
#include <cassert>
#include <cstdlib>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <hiprand/hiprand_kernel.h>
#include <iostream>
#include <iomanip>
template <typename T>
struct curand_printf
{
T _seed ;
T _offset ;
T _seq0 ;
T _seq1 ;
T _zero ;
bool _logf ;
curand_printf( T seed , T offset, T seq0, T seq1, bool logf )
:
_seed(seed),
_offset(offset),
_seq0(seq0),
_seq1(seq1),
_logf(logf),
_zero(0)
{
}
__device__
void operator()(unsigned id)
{
unsigned thread_offset = 0 ;
hiprandState_t s;
hiprand_init(_seed, id + thread_offset, _offset, &s);
printf(" id:%4u thread_offset:%u seq0:%llu seq1:%llu \n", id, thread_offset, _seq0, _seq1 );
for(T i = _zero ; i < _seq1 ; ++i)
{
float f = hiprand_uniform(&s);
if( i < _seq0 ) continue ;
printf(" %lf ", f );
if(_logf)
{
float lf = -logf(f)*1e7f ;
printf(" %lf ", lf );
//double d(f) ;
//double ld = -log(d)*1e7 ;
//double ld = -log(double(f))*1e7 ;
float ld = -log(double(f))*1e7 ;
printf(" %15.10g ", ld );
}
if( i % 4 == 3 ) printf("\n") ;
}
}
};
/*
__device__ void
hiprand_init (
unsigned long long seed,
unsigned long long sequence,
unsigned long long offset,
hiprandState_t *state)
The hiprand_init() function sets up an initial state allocated by the caller
using the given seed, sequence number, and offset within the sequence.
Different seeds are guaranteed to produce different starting states and
different sequences. The same seed always produces the same state and the same
sequence. The state set up will be the state after 2^67 sequence + offset calls
to hiprand() from the seed state.
*/
int main(int argc, char** argv)
{
int i0 = argc > 1 ? atoi(argv[1]) : 0 ;
int i1 = argc > 2 ? atoi(argv[2]) : i0+1 ;
int q0 = argc > 3 ? atoi(argv[3]) : 0 ;
int q1 = argc > 4 ? atoi(argv[4]) : 16 ;
char* LOGF = getenv("LOGF") ;
bool logf = LOGF != NULL ;
std::cout
<< argv[0]
<< std::endl
<< " i0 " << i0
<< " i1 " << i1
<< " q0 " << q0
<< " q1 " << q1
<< " logf " << ( logf ? "Y" : "N" )
<< std::endl
;
assert( i0 >= 0 && i1 >= 0 );
assert( q0 >= 0 && q1 >= 0 );
assert( i0 < i1 );
assert( q0 < q1 );
thrust::for_each(
thrust::counting_iterator<int>(i0),
thrust::counting_iterator<int>(i1),
curand_printf<unsigned long long>(0,0,q0,q1,logf));
hipDeviceSynchronize();
return 0;
}
|
aaaff13613e1243b89ebea415b6d277d71809d7b.cu
|
/*
* Copyright (c) 2019 Opticks Team. All Rights Reserved.
*
* This file is part of Opticks
* (see https://bitbucket.org/simoncblyth/opticks).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// http://docs.nvidia.com/cuda/curand/device-api-overview.html#thrust-and-curand-example
// http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MTGP/mtgp3.pdf
#include <cassert>
#include <cstdlib>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <curand_kernel.h>
#include <iostream>
#include <iomanip>
template <typename T>
struct curand_printf
{
T _seed ;
T _offset ;
T _seq0 ;
T _seq1 ;
T _zero ;
bool _logf ;
curand_printf( T seed , T offset, T seq0, T seq1, bool logf )
:
_seed(seed),
_offset(offset),
_seq0(seq0),
_seq1(seq1),
_logf(logf),
_zero(0)
{
}
__device__
void operator()(unsigned id)
{
unsigned thread_offset = 0 ;
curandState s;
curand_init(_seed, id + thread_offset, _offset, &s);
printf(" id:%4u thread_offset:%u seq0:%llu seq1:%llu \n", id, thread_offset, _seq0, _seq1 );
for(T i = _zero ; i < _seq1 ; ++i)
{
float f = curand_uniform(&s);
if( i < _seq0 ) continue ;
printf(" %lf ", f );
if(_logf)
{
float lf = -logf(f)*1e7f ;
printf(" %lf ", lf );
//double d(f) ;
//double ld = -log(d)*1e7 ;
//double ld = -log(double(f))*1e7 ;
float ld = -log(double(f))*1e7 ;
printf(" %15.10g ", ld );
}
if( i % 4 == 3 ) printf("\n") ;
}
}
};
/*
__device__ void
curand_init (
unsigned long long seed,
unsigned long long sequence,
unsigned long long offset,
curandState_t *state)
The curand_init() function sets up an initial state allocated by the caller
using the given seed, sequence number, and offset within the sequence.
Different seeds are guaranteed to produce different starting states and
different sequences. The same seed always produces the same state and the same
sequence. The state set up will be the state after 2^67 sequence + offset calls
to curand() from the seed state.
*/
int main(int argc, char** argv)
{
int i0 = argc > 1 ? atoi(argv[1]) : 0 ;
int i1 = argc > 2 ? atoi(argv[2]) : i0+1 ;
int q0 = argc > 3 ? atoi(argv[3]) : 0 ;
int q1 = argc > 4 ? atoi(argv[4]) : 16 ;
char* LOGF = getenv("LOGF") ;
bool logf = LOGF != NULL ;
std::cout
<< argv[0]
<< std::endl
<< " i0 " << i0
<< " i1 " << i1
<< " q0 " << q0
<< " q1 " << q1
<< " logf " << ( logf ? "Y" : "N" )
<< std::endl
;
assert( i0 >= 0 && i1 >= 0 );
assert( q0 >= 0 && q1 >= 0 );
assert( i0 < i1 );
assert( q0 < q1 );
thrust::for_each(
thrust::counting_iterator<int>(i0),
thrust::counting_iterator<int>(i1),
curand_printf<unsigned long long>(0,0,q0,q1,logf));
cudaDeviceSynchronize();
return 0;
}
|
cb0a086b041d35881f32b3e4eb02809026f50621.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <unordered_set>
#include <set>
#include <algorithm>
#include <fstream>
#include <sstream>
#include <stdio.h>
#include "hip/hip_runtime.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include "Utils.hpp"
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
// Maybe there is a simple 64-bit solution out there?
__host__ __device__ inline int hammingWeight(uint32_t v)
{
v = v - ((v>>1) & 0x55555555);
v = (v & 0x33333333) + ((v>>2) & 0x33333333);
return ((v + (v>>4) & 0xF0F0F0F) * 0x1010101) >> 24;
}
__host__ __device__ inline int hammingDistance(const uint64_t a, const uint64_t b)
{
const uint64_t delta = a ^ b;
return hammingWeight(delta & 0xffffffffULL) + hammingWeight(delta >> 32);
}
struct HammingDistanceFilter
{
const uint64_t _target, _maxDistance;
HammingDistanceFilter(const uint64_t target, const uint64_t maxDistance) :
_target(target), _maxDistance(maxDistance) {
}
__host__ __device__ bool operator()(const uint64_t hash) {
return hammingDistance(_target, hash) <= _maxDistance;
}
};
void findHashes(
const thrust::host_vector<uint64_t> hashesCpu,
const std::unordered_set<uint64_t> hashesToSearch,
const int maxDistance
) {
/*std::cout << hashesToSearch.size() << " hashes to find form " <<
hashesCpu.size() << ", " << maxDistance << " max distance" << std::endl;*/
thrust::device_vector<uint64_t> hashesGpu = hashesCpu;
thrust::device_vector<uint64_t> matchesGpu(hashesCpu.size());
std::vector<double> durations;
std::vector<size_t> nMatches;
for (auto it = hashesToSearch.begin(); it != hashesToSearch.end(); ++it) {
CHRTimer timer;
timer.Start();
matchesGpu.clear();
const auto matchesGpuEnd = thrust::copy_if(
hashesGpu.cbegin(), hashesGpu.cend(), matchesGpu.begin(), HammingDistanceFilter(*it, maxDistance)
);
thrust::system::cuda::detail::synchronize();
nMatches.push_back(matchesGpuEnd - matchesGpu.begin());
thrust::host_vector<uint64_t> matchesCpu(nMatches.back());
thrust::copy(matchesGpu.begin(), matchesGpuEnd, matchesCpu.begin());
thrust::system::cuda::detail::synchronize();
timer.Stop();
durations.push_back(timer.GetElapsedAsSeconds() * 1000.0);
#if 0
std::set<uint64_t> uniqueMatches(matchesCpu.begin(), matchesCpu.end());
std::cout << nMatches.back() << " matches (" << uniqueMatches.size() << " unique) for\n " << Utils::toBinary(*it) << "\n";
for (auto it2 = uniqueMatches.begin(); it2 != uniqueMatches.end(); ++it2) {
std::cout << " - " << Utils::toBinary(*it2) << "\n";
}
std::cout << std::endl;
#endif
}
std::sort(durations.begin(), durations.end());
std::sort(nMatches.begin(), nMatches.end());
const int percentiles[] = {5, 25, 50, 75, 95, 99, -1};
char buffer[2048];
#if 0
for (int i = 0; percentiles[i] >= 0; ++i) {
const int j = (int)(0.5 + percentiles[i] * 0.01 * (hashesToSearch.size() - 1));
sprintf_s(buffer, 2048, "%3dth: %9.6f sec, %8d matches\n", percentiles[i], durations[j], nMatches[j]);
std::cout << buffer;
}
#else
char *bufferPtr = buffer;
bufferPtr += sprintf_s(bufferPtr, 2048, "%5.2f", hashesCpu.size() * 0.000001);
for (int i = 0; percentiles[i] >= 0; ++i) {
const int j = (int)(0.5 + percentiles[i] * 0.01 * (hashesToSearch.size() - 1));
bufferPtr += sprintf_s(bufferPtr, 2048, " %9.6f", durations[j]);
}
for (int i = 0; percentiles[i] >= 0; ++i) {
const int j = (int)(0.5 + percentiles[i] * 0.01 * (hashesToSearch.size() - 1));
bufferPtr += sprintf_s(bufferPtr, 2048, " %7d", nMatches[j]);
}
std::cout << buffer;
#endif
std::cout << std::endl;
}
/*
Exmple parameters: 500 1 2 3 4 5 to run 500 searaches from 1, 2, 3, 4 and 5 million 64-bit hashes
*/
int main(int argc, char **argv)
{
const int nSearches = argc >= 2 ? atoi(argv[1]) : 50;
std::vector<int> nHashes;
if (argc >= 3) {
for (int i = 2; i < argc; ++i) {
nHashes.push_back(atoi(argv[i]) * 1000000);
}
}
else {
nHashes.push_back(100000);
}
thrust::host_vector<uint64_t> hashesCpu(nHashes.back());
std::unordered_set<uint64_t> hashesToSearch(nSearches);
{
// Use py/generate_to_file.py to generate this file
std::ifstream numbers("numbers.txt");
for (int i = 0; i < nHashes.back(); ++i) {
numbers >> hashesCpu[i];
}
}
for (int i = 0; hashesToSearch.size() < nSearches && i < hashesCpu.size(); ++i) {
hashesToSearch.insert(hashesCpu[i]);
}
for (auto it = nHashes.begin(); it != nHashes.end(); ++it) {
findHashes(thrust::host_vector<uint64_t>(hashesCpu.begin(), hashesCpu.begin() + *it), hashesToSearch, 8);
}
return 0;
}
|
cb0a086b041d35881f32b3e4eb02809026f50621.cu
|
#include <unordered_set>
#include <set>
#include <algorithm>
#include <fstream>
#include <sstream>
#include <stdio.h>
#include "cuda_runtime.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include "Utils.hpp"
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
// Maybe there is a simple 64-bit solution out there?
__host__ __device__ inline int hammingWeight(uint32_t v)
{
v = v - ((v>>1) & 0x55555555);
v = (v & 0x33333333) + ((v>>2) & 0x33333333);
return ((v + (v>>4) & 0xF0F0F0F) * 0x1010101) >> 24;
}
__host__ __device__ inline int hammingDistance(const uint64_t a, const uint64_t b)
{
const uint64_t delta = a ^ b;
return hammingWeight(delta & 0xffffffffULL) + hammingWeight(delta >> 32);
}
struct HammingDistanceFilter
{
const uint64_t _target, _maxDistance;
HammingDistanceFilter(const uint64_t target, const uint64_t maxDistance) :
_target(target), _maxDistance(maxDistance) {
}
__host__ __device__ bool operator()(const uint64_t hash) {
return hammingDistance(_target, hash) <= _maxDistance;
}
};
void findHashes(
const thrust::host_vector<uint64_t> hashesCpu,
const std::unordered_set<uint64_t> hashesToSearch,
const int maxDistance
) {
/*std::cout << hashesToSearch.size() << " hashes to find form " <<
hashesCpu.size() << ", " << maxDistance << " max distance" << std::endl;*/
thrust::device_vector<uint64_t> hashesGpu = hashesCpu;
thrust::device_vector<uint64_t> matchesGpu(hashesCpu.size());
std::vector<double> durations;
std::vector<size_t> nMatches;
for (auto it = hashesToSearch.begin(); it != hashesToSearch.end(); ++it) {
CHRTimer timer;
timer.Start();
matchesGpu.clear();
const auto matchesGpuEnd = thrust::copy_if(
hashesGpu.cbegin(), hashesGpu.cend(), matchesGpu.begin(), HammingDistanceFilter(*it, maxDistance)
);
thrust::system::cuda::detail::synchronize();
nMatches.push_back(matchesGpuEnd - matchesGpu.begin());
thrust::host_vector<uint64_t> matchesCpu(nMatches.back());
thrust::copy(matchesGpu.begin(), matchesGpuEnd, matchesCpu.begin());
thrust::system::cuda::detail::synchronize();
timer.Stop();
durations.push_back(timer.GetElapsedAsSeconds() * 1000.0);
#if 0
std::set<uint64_t> uniqueMatches(matchesCpu.begin(), matchesCpu.end());
std::cout << nMatches.back() << " matches (" << uniqueMatches.size() << " unique) for\n " << Utils::toBinary(*it) << "\n";
for (auto it2 = uniqueMatches.begin(); it2 != uniqueMatches.end(); ++it2) {
std::cout << " - " << Utils::toBinary(*it2) << "\n";
}
std::cout << std::endl;
#endif
}
std::sort(durations.begin(), durations.end());
std::sort(nMatches.begin(), nMatches.end());
const int percentiles[] = {5, 25, 50, 75, 95, 99, -1};
char buffer[2048];
#if 0
for (int i = 0; percentiles[i] >= 0; ++i) {
const int j = (int)(0.5 + percentiles[i] * 0.01 * (hashesToSearch.size() - 1));
sprintf_s(buffer, 2048, "%3dth: %9.6f sec, %8d matches\n", percentiles[i], durations[j], nMatches[j]);
std::cout << buffer;
}
#else
char *bufferPtr = buffer;
bufferPtr += sprintf_s(bufferPtr, 2048, "%5.2f", hashesCpu.size() * 0.000001);
for (int i = 0; percentiles[i] >= 0; ++i) {
const int j = (int)(0.5 + percentiles[i] * 0.01 * (hashesToSearch.size() - 1));
bufferPtr += sprintf_s(bufferPtr, 2048, " %9.6f", durations[j]);
}
for (int i = 0; percentiles[i] >= 0; ++i) {
const int j = (int)(0.5 + percentiles[i] * 0.01 * (hashesToSearch.size() - 1));
bufferPtr += sprintf_s(bufferPtr, 2048, " %7d", nMatches[j]);
}
std::cout << buffer;
#endif
std::cout << std::endl;
}
/*
Exmple parameters: 500 1 2 3 4 5 to run 500 searaches from 1, 2, 3, 4 and 5 million 64-bit hashes
*/
int main(int argc, char **argv)
{
const int nSearches = argc >= 2 ? atoi(argv[1]) : 50;
std::vector<int> nHashes;
if (argc >= 3) {
for (int i = 2; i < argc; ++i) {
nHashes.push_back(atoi(argv[i]) * 1000000);
}
}
else {
nHashes.push_back(100000);
}
thrust::host_vector<uint64_t> hashesCpu(nHashes.back());
std::unordered_set<uint64_t> hashesToSearch(nSearches);
{
// Use py/generate_to_file.py to generate this file
std::ifstream numbers("numbers.txt");
for (int i = 0; i < nHashes.back(); ++i) {
numbers >> hashesCpu[i];
}
}
for (int i = 0; hashesToSearch.size() < nSearches && i < hashesCpu.size(); ++i) {
hashesToSearch.insert(hashesCpu[i]);
}
for (auto it = nHashes.begin(); it != nHashes.end(); ++it) {
findHashes(thrust::host_vector<uint64_t>(hashesCpu.begin(), hashesCpu.begin() + *it), hashesToSearch, 8);
}
return 0;
}
|
e90b5938a5abe456eaed5696d73579a89a74f95b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Author: Henry Peng (20165483)
* University of Western Australia
* School of Computer Science and Software Engineering
* December 2010
*/
//#####################################
// Strategy for possible improvement:
//#####################################
// 1) Reduce Memcpy calls needed: Memcpy whole fr[iteration] to global variable ?
// 2) Reduce the number variables necessary for device (extra cudamallocing may reduce speed). e.g. remove temp variable
// 3) Parallel prefix sum, reduce number of threads needed? Can it increase speed? Currently: 256 threads
// 4) Reduce memory required for eliminated array and scan results array. Currently fixed at 512 points for each. plus Not scalable.
// 5) Reduce the size of memory allocated for each cuda variable.
// 6) Make into single kernel function with many device codes, single cudaThreadsynchronise (implement scan_best in device mode)
// 7) Look into using shared memory
//######################################
/////////////////////////////////////////////////////////
// Includes and Defines
/////////////////////////////////////////////////////////
//#define CUDPP_STATIC_LIB
#include "/usr/local/NVIDIA_GPU_Computing_SDK/C/common/inc/cudpp/cudpp.h"
#include "read.c"
#include "scan_best_kernel.cu"
#include <stdbool.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <math.h>
#include "cutil.h"
//#include "radixsort.cu"
//extern float ehv(int index, FRONT front);
//extern float hypervolume(FRONT front);
#define MIN(x, y) ((x < y) ? (x) : (y))
unsigned int frontSize;
unsigned int pointSize;
/////////////////////////////////////////////////////////
// Global Variables
/////////////////////////////////////////////////////////
//int maxDepth = -1; //the maximum depth you have reached
int n = 0; //the dimension of the current front we are working on
int iteration = 0; //depth of the recursion starting from 0
float hypervolume(FRONT);
FRONT *fr; //storage for storing array of sprimed/non-dominating fronts as we go deeper into the recursion
/* Device global variables */
float *d_temp;
float *d_front;
int *d_eliminated;
int *d_scanoutput;
float *d_temp2;
// unused anymore
FRONT *frontsArray;
float *ehvStack;
float *hvStack;
// cpu memory stacks
int *indexStack;
int *nPointsStack;
// cuda memory stacks
float *d_frontsArray;
float *d_hvStack;
float *d_ehvStack;
//NOte: n is needed for slicing and sorting, iteration is needed for saving array of fronts when going deeper into recursion
/////////////////////////////////////////////////////////
// GPU Kernel Functions
/////////////////////////////////////////////////////////
/**
* Sprimes a front in parallel.
*/
__global__ void sprimeFront(float *frPoints_device, float *frontPoints_device, int index) {
frPoints_device[blockIdx.x*blockDim.x+threadIdx.x] = MIN(frontPoints_device[index*blockDim.x+threadIdx.x],
frontPoints_device[(blockIdx.x+1+index)*blockDim.x+threadIdx.x]);
}
/**
* Device Function: Determine domination status of point A and B.
* Similar to CPU implementation.
*
* returns 1 if point b dominates a
* zero if non-dominating
* returns -1 if point a dominates b
* returns 2 if point is equal
*/
__device__ int dominated(float *point_a, float *point_b, int nDim) {
int result = 2;
for (int i = 0; i < nDim; i++) {
if (point_a[i] > point_b[i]) {
if (result != 1) result = -1; else return 0;
}
if (point_b[i] > point_a[i]) {
if (result != -1) result = 1; else return 0;
}
}
return result;
}
/**
* Computes eliminated array in parallel.
* Flag = 0 indicates eliminated, flag = 1 is kept.
* e.g. result of a front with 5 points: [0, 1, 1, 0, 1].
* (known "trivial" bug: equal points will not be eliminated).
*/
__global__ void computeEliminatedArray(float *d_fr_iteration, int nDim, int *eliminated) {
__shared__ int flag;
flag = 1;
__syncthreads();
if (dominated(&d_fr_iteration[blockIdx.x*nDim] , &d_fr_iteration[threadIdx.x*nDim], nDim) == 1)
flag = 0;
__syncthreads();
//if (threadIdx.x==0) {
eliminated[blockIdx.x] = flag;
//}
}
/**
* Insert the results and reorder into temp array in parallel.
*/
__global__ void insertResults(float *d_fr_iteration, float *temp, int *eliminated, int *scanoutput) {
if (eliminated[blockIdx.x] == 1) {
//insert the non-dominated points
temp[(scanoutput[blockIdx.x]-1)*blockDim.x+threadIdx.x] = d_fr_iteration[blockIdx.x*blockDim.x+threadIdx.x];
} /*else {
//if eliminated insert at the end of the temp array.
temp[(gridDim.x-1-(blockIdx.x-scanoutput[blockIdx.x]))*blockDim.x+threadIdx.x] = d_fr_iteration[blockIdx.x*blockDim.x+threadIdx.x];
}*/
}
/**
* Create an inclusive scan output from exclusive scan output.
* Shift array left, and insert the sum of last element of scan and
* last element of input array, at the end of the sum.
*/
__global__ void scan_inclusive(int *d_scanbest, int *d_eliminated, int nPoints) {
if (threadIdx.x > 0)
d_scanbest[threadIdx.x-1] = d_scanbest[threadIdx.x];
//__syncthreads();
//if (threadIdx.x == nPoints-1) {
if (nPoints == 1) {
d_scanbest[nPoints-1] = d_eliminated[nPoints-1];
} else {
d_scanbest[nPoints-1] = d_scanbest[nPoints-2] + d_eliminated[nPoints-1];
}
//}
}
/////////////////////////////////////////////////////////
// Helper methods
/////////////////////////////////////////////////////////
/**
* Prefix-sum sequential on CPU. (Deprecated)
*/
int *sequentialScan(int *eliminated, int n)
{
int *output = (int *) malloc(sizeof(int) *n);
output[0] = eliminated[0];
for (int i = 1; i < n; i++) {
output[i] = output[i-1] + eliminated[i];
}
return output;
}
/**
* Prints a front.
*/
void printfront(FRONT front) {
for (int j = 0; j < front.nPoints; j++)
{
printf("\t");
for (int k = 0; k < n; k++)
{
printf("%f ",front.points[j].objectives[k]);
}
printf("\n");
}
}
/////////////////////////////////////////////////////////
// CPU Functions
/////////////////////////////////////////////////////////
/**
* Determine domination status of point A and B.
*
* returns 1 if point b dominates a
* zero if non-dominating
* returns -1 if point a dominates b
* returns 2 if point is equal
*/
int dominated(POINT a, POINT b) {
int result = 2;
for (int i = 0; i < n; i++) {
if (a.objectives[i] > b.objectives[i]) {
if (result != 1) result = -1; else return 0;
}
if (b.objectives[i] > a.objectives[i]) {
if (result != -1) result = 1; else return 0;
}
}
return result;
}
/**
* compare function for qsort sorting front in the last objective, i.e. increasing from top to bottom
* and we process hypervolumes from the bottom
*/
int compare (const void *a, const void *b)
{
//n == maxDimensions-iteration
for (int i = n - 1; i >= 0; i--) {
if (((*(POINT *)a).objectives[i] > (*(POINT *)b).objectives[i])) return 1;
if (((*(POINT *)a).objectives[i] < (*(POINT *)b).objectives[i])) return -1;
}
return 0;
}
/**
* Returns a sprimed & non-dominating front relative to point p at index.
*/
void limitset(int index, FRONT front) {
int z = front.nPoints-1-index;
/* <1> copy front to device memory */
double h_front[front.nPoints*n];
for (int i = 0; i < front.nPoints; i++) {
for (int j = 0; j < n; j++) {
h_front[i*n+j] = front.points[i].objectives[j];
}
}
hipMemcpy(d_front, h_front, front.nPoints*n*sizeof(double), hipMemcpyHostToDevice);
/* <2> Sprimes the front */
hipLaunchKernelGGL(( sprimeFront), dim3(z), dim3(n) , 0, 0, d_temp, d_front, index);
hipDeviceSynchronize(); // block until the device has completed
/* <3> Compute eliminated array */
hipLaunchKernelGGL(( computeEliminatedArray), dim3(z), dim3(z) , 0, 0, d_temp, n, d_eliminated);
hipDeviceSynchronize();
int N = z;
int blockSize = 512;
int nBlocks = N/blockSize + (N%blockSize == 0 ? 0:1);
/* <4> Compute prefix-sum in parallel */
hipLaunchKernelGGL(( scan_best), dim3(nBlocks), dim3(blockSize/2), sizeof(int)*(blockSize) , 0, d_scanoutput, d_eliminated, blockSize);
hipDeviceSynchronize();
hipLaunchKernelGGL(( scan_inclusive), dim3(1), dim3(z) , 0, 0, d_scanoutput, d_eliminated, z); //make the result into an inclusive scan result.
hipDeviceSynchronize();
/* <5> Insert the results into temp buffer */
hipLaunchKernelGGL(( insertResults), dim3(z),dim3(n), 0, 0, d_temp, d_temp2, d_eliminated, d_scanoutput);
hipDeviceSynchronize();
/* <6> Copy final results from device buffer back to host */
hipMemcpy(h_front, d_temp2, z*n*sizeof(double), hipMemcpyDeviceToHost);
for (int i = 0; i < z; i++) {
for (int j = 0; j < n; j++) {
fr[iteration].points[i].objectives[j] = h_front[i*n+j];
}
}
/* <7> Update number of points */
hipMemcpy(&fr[iteration].nPoints, &d_scanoutput[z-1], sizeof(int), hipMemcpyDeviceToHost); //update number of points
}
/**
* Returns the size of exclusive hypervolume of point p at index relative to a front set.
*/
float ehv(int index, FRONT front) {
//hypervolume of a single poinit
float ehv = 1;
for (int i = 0; i < n; i++) {
ehv *= front.points[index].objectives[i];
}
//if not the last point, then go deeper into the recursion
if (index < front.nPoints-1) {
limitset(index, front); //limit the front relative to index.
iteration++; //slicing
ehv -= hypervolume(fr[iteration-1]); //subtract the hypervolume of the limit set from ehv.
iteration--;
}
return ehv;
}
/**
* Returns the size of hypervolume of a front.
*/
float hypervolume(FRONT front) {
//sort the front with qsort
qsort(front.points, front.nPoints, sizeof (POINT), compare);
//calculate for base case = 2D
if (n==2) {
float vol2d = (front.points[0].objectives[0] * front.points[0].objectives[1]);
for (int i = 1; i < front.nPoints; i++) {
vol2d += (front.points[i].objectives[0]) *
(front.points[i].objectives[1] - front.points[i - 1].objectives[1]);
}
return vol2d;
}
float sumhv = 0;
n--;
//sum all the segments
for (int i = front.nPoints - 1; i >= 0; i--)
//for (int i = 0; i < front.nPoints; i++) //annoying bug that cause inaccurate results
sumhv += front.points[i].objectives[n] * ehv(i, front);
n++;
return sumhv;
}
// creates the front frontsArray[fr-1].points[indexStack[fr-1]+1 ..] in frontsArray[fr],
// with each point bounded by frontsArray[fr-1].points[indexStack[fr-1]]
// and with dominated points removed
void makeDominatedBit()
{
int z = frontsArray[iteration-1].nPoints - 1 - indexStack[iteration-1];
for (int i = 0; i < z; i++) {
for (int j = 0; j < n; j++) {
frontsArray[iteration].points[i].objectives[j] = MIN(frontsArray[iteration-1].points[indexStack[iteration-1]].objectives[j],
frontsArray[iteration-1].points[indexStack[iteration-1] + 1 + i].objectives[j]);
}
}
POINT t; // have to do proper swaps because of the reuse of the memory hierarchy
frontsArray[iteration].nPoints = 1;
for (int i = 1; i < z; i++) {
int j = 0;
bool keep = true;
while (j < frontsArray[iteration].nPoints && keep) {
switch (dominated(frontsArray[iteration].points[i], frontsArray[iteration].points[j])) {
case -1:
t = frontsArray[iteration].points[j];
frontsArray[iteration].points[j] = frontsArray[iteration].points[frontsArray[iteration].nPoints - 1];
frontsArray[iteration].points[frontsArray[iteration].nPoints - 1] = t;
frontsArray[iteration].nPoints--;
break;
case 0:
j++;
break;
// case 2: printf("Identical points!\n");
default:
keep = false;
}
}
if (keep) {
t = frontsArray[iteration].points[frontsArray[iteration].nPoints];
frontsArray[iteration].points[frontsArray[iteration].nPoints] = frontsArray[iteration].points[i];
frontsArray[iteration].points[i] = t;
frontsArray[iteration].nPoints++;
}
}
}
void hvnew() {
// sets hvStack[0] to the hypervolume of frontsArray[0][0 ..]
qsort(frontsArray[0].points, frontsArray[0].nPoints, sizeof(POINT), compare);
indexStack[0] = frontsArray[0].nPoints - 1;
while (indexStack[0] >= 0) { // there are jobs remaining
if (indexStack[iteration] < 0) { // we've finished the jobs at this level: i.e. completed all ehv calculation (HV is complete for that level!)
iteration--;
// compute the single point ehv excluding the last objective
ehvStack[iteration] -= hvStack[iteration+1];
// add the ehv multiplied by the last objective left out due to n--, to the hv stack
hvStack[iteration] += (frontsArray[iteration].points[indexStack[iteration]].objectives[n]) * ehvStack[iteration];
// 1 job is finished for the previous iteration
indexStack[iteration]--;
// finished with next level ehv
n++;
} else if (n == 2) { // do this job using the linear algorithm
//TODO make this work
/*if (indexStack[0] == 0) { //or iteration== 0
hvStack[0] = frontsArray[0].points[0].objectives[0] * frontsArray[0].points[0].objectives[1];
for (int i = 1; i < frontsArray[0].nPoints; i++) {
hvStack[0] += (frontsArray[0].points[i].objectives[0]) *
(frontsArray[0].points[i].objectives[1] - frontsArray[0].points[i - 1].objectives[1]);
}
indexStack[0]--;
n++;
} else {*/
iteration--;
ehvStack[iteration] -= frontsArray[iteration+1].points[0].objectives[0] * frontsArray[iteration+1].points[0].objectives[1];
for (int i = 1; i < frontsArray[iteration+1].nPoints; i++) {
ehvStack[iteration] -= (frontsArray[iteration+1].points[i].objectives[0]) * (frontsArray[iteration+1].points[i].objectives[1] - frontsArray[iteration+1].points[i-1].objectives[1]);
}
hvStack[iteration] += frontsArray[iteration].points[indexStack[iteration]].objectives[n] * ehvStack[iteration];
indexStack[iteration]--;
n++;
//}
} else { // we need to "recurse"
n--;
ehvStack[iteration] = 1;
for (int i = 0; i < n; i++) {
//compute the single point ehv excluding the last objective
ehvStack[iteration] *= frontsArray[iteration].points[indexStack[iteration]].objectives[i];
}
if (indexStack[iteration] == frontsArray[iteration].nPoints - 1) { // first job at this level: set will be empty = no need to recurse
// add the first ehv multiplied by the last objective left out due to n--, to the hv stack
hvStack[iteration] = frontsArray[iteration].points[indexStack[iteration]].objectives[n] * ehvStack[iteration];
indexStack[iteration]--;
// finished with first level ehv (index = nPoints-1), now need to calculate the levels until reach (index = 0)
n++;
} else { // set will be non-empty: create a new job
//go to next level recursion
iteration++;
makeDominatedBit();
qsort(frontsArray[iteration].points, frontsArray[iteration].nPoints, sizeof(POINT), compare);
//reset index stack to the number of points-1
indexStack[iteration] = frontsArray[iteration].nPoints - 1;
}
}
}
}
/**
* Timer Functions
*/
void run(int argc, char *argv[])
{
unsigned int timer = 0;
CUT_DEVICE_INIT(argc, argv);
/////////////////////////////////////////////////////////////////////
// Create and start a timer called "timer"
// alls to create ans start times are enveloped in the CUT_SAFE_CALL
// This CUDA Utility Tool checks for errors upon return.
// If an error is found, it prints out and error message, file name,
// and line number in file where the error can be found
/////////////////////////////////////////////////////////////////////
timer = 0;
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
// Stop the timer
CUT_SAFE_CALL(cutStopTimer(timer));
printf( "Processing time: %f (ms)\n", cutGetTimerValue(timer));
// Delete the timer
CUT_SAFE_CALL(cutDeleteTimer(timer));
}
/**
* Runs a parallel hypervolume
*/
__global__ void hvparallellol() {
// Should call many device functions
//sortParallel();
//d_indexStack[0] = d_frontsArray[0].nPoints - 1;
}
/**
* Runs a parallel hypervolume
*/
//void hvparallel() {
/*int blockSize = 100;
int nBlocks = N/blockSize + (N%blockSize == 0 ? 0:1);
// where N is the parallel threads required
global<<<nBlocks, blockSize>>> ( param , N );
hipDeviceSynchronize();
checkCUDAError("HV parallel failed!");*/
//}
////////////////////////////////////////////////////////////////
// Start of CUDA CODE
////////////////////////////////////////////////////////////////
/**
* Returns a sprimed & non-dominating front relative to point p at index.
*/
void limitset() {
// TODO make this a kernel which calls many device functions
// TODO kernels may need to be passed the number of points in the front (from nPointsStack)
// TODO &d_frontsArray[frontSize*iteration] may need to be changed to d_frontsArray+frontSize*iteration
// sets the number of points in sprimed front
int z = nPointsStack[iteration-1] - 1 - indexStack[iteration-1];
// sprimes the front and store it into temporary storage
hipLaunchKernelGGL(( sprimeFront), dim3(z), dim3(n) , 0, 0, d_temp, d_frontsArray, indexStack[iteration-1]);
hipDeviceSynchronize();
// compute eliminated array and store it in d_eliminated
hipLaunchKernelGGL(( computeEliminatedArray), dim3(z), dim3(z) , 0, 0, d_temp, n, d_eliminated);
hipDeviceSynchronize();
// compute parallel prefix sum and store the result in d_scanoutput
// TODO may need to make use of cudpp for this
hipLaunchKernelGGL(( scan_best), dim3(256), dim3(512/2), sizeof(int)*(512) , 0, d_scanoutput, d_eliminated, 512);
hipDeviceSynchronize();
hipLaunchKernelGGL(( scan_inclusive), dim3(1), dim3(z) , 0, 0, d_scanoutput, d_eliminated, z); //make the result into an inclusive scan result.
hipDeviceSynchronize();
// compute the results and store it in frontArray
hipLaunchKernelGGL(( insertResults), dim3(z),dim3(n), 0, 0, d_temp, &d_frontsArray[frontSize*iteration], d_eliminated, d_scanoutput);
hipDeviceSynchronize();
// update number of points to the host
hipMemcpy(&nPointsStack[iteration], &d_scanoutput[z-1], sizeof(int), hipMemcpyDeviceToHost); //update number of points
}
/**
* prints a front located on device
*/
void printfront(float *d_front, int numPoints) {
printf("----------------------------------\n");
float *front = (float *) malloc(frontSize*sizeof(float));
hipMemcpy(front, d_front, frontSize*sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < numPoints; i++) {
for (int j = 0; j < n; j++) {
printf("%1.1f ", front[i*pointSize+j]);
}
printf("\n");
}
printf("----------------------------------\n");
free(front);
}
/**
* @param front front to sort
* @param numElements number of points
* @param size size of each point
*/
void parallelSort(float *d_in, int numElements) {
// set the config
CUDPPConfiguration config;
//config.op = CUDPP_MAX;
config.datatype = CUDPP_FLOAT;
config.algorithm = CUDPP_SORT_RADIX_GLOBAL;
config.options = CUDPP_OPTION_FORWARD;
// create the plan
CUDPPHandle sortPlan = 0;
CUDPPResult result = cudppPlan(&sortPlan, config, numElements, 1, 0);
// if not successful then exit
if (CUDPP_SUCCESS != result)
{
printf("Error creating CUDPPPlan\n");
exit(-1);
}
// allocate array for sorted results
float *d_out;
hipMalloc( (void **) &d_out, numElements*sizeof(float));
// Run the sort
cudppSort(sortPlan, d_out, d_in, numElements);
// TODO reassign pointers and remove costly memcpy operation
//d_in = d_out;
hipMemcpy(d_in, d_out, numElements*sizeof(float), hipMemcpyDeviceToDevice);
// Destroy the plan
result = cudppDestroyPlan(sortPlan);
if (CUDPP_SUCCESS != result)
{
printf("Error destroying CUDPPPlan\n");
exit(-1);
}
// TODO reuse config and destroy plan at the end
}
__device__ int binarySearch(float *array, float value, int low, int high) {
while (low <= high) {
int mid = (low+high) / 2;
if (array[mid] > value)
high = mid-1;
else if (array[mid] < value)
low = mid+1;
else
return mid; //found
}
return -1; //not found
}
__global__ void arrange(float *d_out, float *d_in, float *lastObjectives, int objective, int pointSize) {
// conduct binary search on the last objectives
int location = binarySearch(lastObjectives, d_in[threadIdx.x*pointSize+objective], 0, blockDim.x);
// rearrange into a temporary array
for (int i = 0; i < pointSize; i++) {
d_out[location*pointSize+i] = d_in[threadIdx.x*pointSize+i];
}
}
__global__ void sort(float *lastObjectives, float *d_in, int i, int pointSize) {
lastObjectives[threadIdx.x] = d_in[threadIdx.x*pointSize+i];
}
void sortPoints(float *d_in, int numElements) {
float *lastObjectives;
hipMalloc( (void **) &lastObjectives, numElements*sizeof(float));
// sorts starting at the last objective
for (int i = n-1; i == n-1; i--) {
//for (int i = n-1; i >= 0; i--) {
// set the lastObjectives to be sorted
hipLaunchKernelGGL(( sort), dim3(1), dim3(numElements), 0, 0, lastObjectives, d_in, n-1, pointSize);
//sorts the lastObjectives
parallelSort(lastObjectives, numElements);
// allocate array for arranged results
float *d_out;
hipMalloc( (void **) &d_out, numElements*pointSize*sizeof(float));
//arrange the order according to the last objectives
hipLaunchKernelGGL(( arrange), dim3(1), dim3(numElements), 0, 0, d_out, d_in, lastObjectives, n-1, pointSize);
// copy d_out back to d_in
hipMemcpy(d_in, d_out, numElements*pointSize*sizeof(float), hipMemcpyDeviceToDevice);
}
hipFree(lastObjectives);
}
//////////////////////////////////////////////////////////
// HV CUDA
//////////////////////////////////////////////////////////
__host__ void set(float *d_ehvStack, int iteration) {
d_ehvStack[iteration] = 1;
}
void hvparallel() {
// sort the array
sortPoints(&d_frontsArray[frontSize*0], nPointsStack[0]); // sorts the points located in front[0], use nPointsStack[0] for the number of points
indexStack[0] = nPointsStack[0] - 1;
// TODO host cannot access device memory ehv, and hv and frontsArray, need CUDA kernels for this
// TODO d_frontsArray , d_hvStack and d_ehvStack is not possible
while (indexStack[0] >= 0) {
if (indexStack[iteration] < 0) {
iteration--;
ehvStack[iteration] -= hvStack[iteration+1];
hvStack[iteration] += d_frontsArray[frontSize*iteration+pointSize*indexStack[iteration]+n] * ehvStack[iteration];
indexStack[iteration]--;
n++;
} else if (n == 2) {
iteration--;
ehvStack[iteration] -= d_frontsArray[frontSize*(iteration+1)+pointSize*0+0] * d_frontsArray[frontSize*(iteration+1)+pointSize*0+1];
for (int i = 1; i < nPointsStack[iteration+1]; i++) {
ehvStack[iteration] -= d_frontsArray[frontSize*(iteration+1)+pointSize*i+0] *
(d_frontsArray[frontSize*(iteration+1)+pointSize*i+1] - d_frontsArray[frontSize*(iteration+1)+pointSize*(i-1)+1]);
}
hvStack[iteration] += d_frontsArray[frontSize*iteration+pointSize*indexStack[iteration]+n] * ehvStack[iteration];
indexStack[iteration]--;
n++;
} else {
n--;
// TODO not allowed segmented fault
d_ehvStack[iteration] = 1;
for (int i = 0; i < n; i++) {
ehvStack[iteration] *= d_frontsArray[frontSize*iteration+pointSize*indexStack[iteration]+i];
}
if (indexStack[iteration] == nPointsStack[iteration] - 1) {
hvStack[iteration] = d_frontsArray[frontSize*iteration+pointSize*indexStack[iteration]+n] * ehvStack[iteration];
indexStack[iteration]--;
n++;
} else {
iteration++;
makeDominatedBit();
sortPoints(&d_frontsArray[frontSize*iteration], nPointsStack[iteration]);
indexStack[iteration] = nPointsStack[iteration]-1;
}
}
}
}
/////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////
int main(int argc, char *argv[]) {
//CUT_DEVICE_INIT(argc, argv);
// read the file
FILECONTENTS *f = readFile(argv[1]);
// start the timer
struct timeval tv1, tv2;
struct rusage ru_before, ru_after;
getrusage (RUSAGE_SELF, &ru_before);
int maxDimensions = 0; //the max number of dimensions in the fronts
int maxPoints = 0; //the max number of points in the fronts
// find the max number of Points, and the max number of Dimensions
for (int i = 0; i < f->nFronts; i++) {
if (f->fronts[i].nPoints > maxPoints)
maxPoints = f->fronts[i].nPoints;
if (f->fronts[i].n > maxDimensions)
maxDimensions = f->fronts[i].n;
}
/* allocate for cuda memory */
hipMalloc( (void **) &d_temp, maxPoints*maxDimensions*sizeof(float));
hipMalloc((void **) &d_scanoutput, (512)*sizeof(int));
hipMalloc((void **) &d_eliminated, (512)*sizeof(int));
hipMalloc( (void **) &d_front, maxPoints*maxDimensions*sizeof(float));
hipMalloc((void**) &d_temp2, sizeof(float)*maxPoints*maxDimensions);
// allocate cuda memory
frontSize = maxPoints*maxDimensions;
pointSize = maxDimensions;
hipMalloc((void **) &d_frontsArray, frontSize * maxDimensions * sizeof(float));
hipMalloc((void **) &d_ehvStack, sizeof(float) * maxDimensions);
hipMalloc((void **) &d_hvStack, sizeof(float) * maxDimensions);
// allocate cpu memory Stacks
indexStack = (int *) malloc(sizeof(int) * maxDimensions);
nPointsStack = (int *) malloc(sizeof(int) * maxDimensions);
// process each front to get the hypervolumes
for (int i = 0; i < f->nFronts; i++) {
// read each front
FRONT front = f->fronts[i];
n = front.n;
nPointsStack[0] = front.nPoints;
// CHECK UNIQUE NESS OF OBJECTIVES
for (int x = n-1; x >= 0; x--) {
for (int j = 0; j < front.nPoints; j++) {
for (int k = 0; k < front.nPoints; k++) {
if (k == j) continue; //avoid checking against itself
if (front.points[j].objectives[x] == front.points[k].objectives[x]) {
fprintf(stderr, "data set are not unique in every objectives\n");
printf("error!!!\n");
exit(EXIT_FAILURE);
}
}
}
}
// copy front to device memory
float h_front[front.nPoints*pointSize];
for (int j = 0; j < front.nPoints; j++) {
for (int k = 0; k < n; k++) {
h_front[j*pointSize+k] = front.points[j].objectives[k];
}
}
hipMemcpy(d_frontsArray, h_front, frontSize*sizeof(float), hipMemcpyHostToDevice);
// run hv parallel
hvparallel();
// copy back hvresult
float hvResult[1];
hipMemcpy(hvResult, &d_hvStack[0], sizeof(float), hipMemcpyDeviceToHost);
// print them out
printf("Calculating Hypervolume for Front:%d...\n", i+1);
printf("\t\t\t\t\t%1.10f\n", hvResult);
}
// stop timer
getrusage (RUSAGE_SELF, &ru_after);
tv1 = ru_before.ru_utime;
tv2 = ru_after.ru_utime;
printf("Average time = %fs\n", (tv2.tv_sec + tv2.tv_usec * 1e-6 - tv1.tv_sec - tv1.tv_usec * 1e-6) / f->nFronts);
// TODO free the storage
return 0;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err) );
exit(-1);
}
}
|
e90b5938a5abe456eaed5696d73579a89a74f95b.cu
|
/*
* Author: Henry Peng (20165483)
* University of Western Australia
* School of Computer Science and Software Engineering
* December 2010
*/
//#####################################
// Strategy for possible improvement:
//#####################################
// 1) Reduce Memcpy calls needed: Memcpy whole fr[iteration] to global variable ?
// 2) Reduce the number variables necessary for device (extra cudamallocing may reduce speed). e.g. remove temp variable
// 3) Parallel prefix sum, reduce number of threads needed? Can it increase speed? Currently: 256 threads
// 4) Reduce memory required for eliminated array and scan results array. Currently fixed at 512 points for each. plus Not scalable.
// 5) Reduce the size of memory allocated for each cuda variable.
// 6) Make into single kernel function with many device codes, single cudaThreadsynchronise (implement scan_best in device mode)
// 7) Look into using shared memory
//######################################
/////////////////////////////////////////////////////////
// Includes and Defines
/////////////////////////////////////////////////////////
//#define CUDPP_STATIC_LIB
#include "/usr/local/NVIDIA_GPU_Computing_SDK/C/common/inc/cudpp/cudpp.h"
#include "read.c"
#include "scan_best_kernel.cu"
#include <stdbool.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <math.h>
#include "cutil.h"
//#include "radixsort.cu"
//extern float ehv(int index, FRONT front);
//extern float hypervolume(FRONT front);
#define MIN(x, y) ((x < y) ? (x) : (y))
unsigned int frontSize;
unsigned int pointSize;
/////////////////////////////////////////////////////////
// Global Variables
/////////////////////////////////////////////////////////
//int maxDepth = -1; //the maximum depth you have reached
int n = 0; //the dimension of the current front we are working on
int iteration = 0; //depth of the recursion starting from 0
float hypervolume(FRONT);
FRONT *fr; //storage for storing array of sprimed/non-dominating fronts as we go deeper into the recursion
/* Device global variables */
float *d_temp;
float *d_front;
int *d_eliminated;
int *d_scanoutput;
float *d_temp2;
// unused anymore
FRONT *frontsArray;
float *ehvStack;
float *hvStack;
// cpu memory stacks
int *indexStack;
int *nPointsStack;
// cuda memory stacks
float *d_frontsArray;
float *d_hvStack;
float *d_ehvStack;
//NOte: n is needed for slicing and sorting, iteration is needed for saving array of fronts when going deeper into recursion
/////////////////////////////////////////////////////////
// GPU Kernel Functions
/////////////////////////////////////////////////////////
/**
* Sprimes a front in parallel.
*/
__global__ void sprimeFront(float *frPoints_device, float *frontPoints_device, int index) {
frPoints_device[blockIdx.x*blockDim.x+threadIdx.x] = MIN(frontPoints_device[index*blockDim.x+threadIdx.x],
frontPoints_device[(blockIdx.x+1+index)*blockDim.x+threadIdx.x]);
}
/**
* Device Function: Determine domination status of point A and B.
* Similar to CPU implementation.
*
* returns 1 if point b dominates a
* zero if non-dominating
* returns -1 if point a dominates b
* returns 2 if point is equal
*/
__device__ int dominated(float *point_a, float *point_b, int nDim) {
int result = 2;
for (int i = 0; i < nDim; i++) {
if (point_a[i] > point_b[i]) {
if (result != 1) result = -1; else return 0;
}
if (point_b[i] > point_a[i]) {
if (result != -1) result = 1; else return 0;
}
}
return result;
}
/**
* Computes eliminated array in parallel.
* Flag = 0 indicates eliminated, flag = 1 is kept.
* e.g. result of a front with 5 points: [0, 1, 1, 0, 1].
* (known "trivial" bug: equal points will not be eliminated).
*/
__global__ void computeEliminatedArray(float *d_fr_iteration, int nDim, int *eliminated) {
__shared__ int flag;
flag = 1;
__syncthreads();
if (dominated(&d_fr_iteration[blockIdx.x*nDim] , &d_fr_iteration[threadIdx.x*nDim], nDim) == 1)
flag = 0;
__syncthreads();
//if (threadIdx.x==0) {
eliminated[blockIdx.x] = flag;
//}
}
/**
* Insert the results and reorder into temp array in parallel.
*/
__global__ void insertResults(float *d_fr_iteration, float *temp, int *eliminated, int *scanoutput) {
if (eliminated[blockIdx.x] == 1) {
//insert the non-dominated points
temp[(scanoutput[blockIdx.x]-1)*blockDim.x+threadIdx.x] = d_fr_iteration[blockIdx.x*blockDim.x+threadIdx.x];
} /*else {
//if eliminated insert at the end of the temp array.
temp[(gridDim.x-1-(blockIdx.x-scanoutput[blockIdx.x]))*blockDim.x+threadIdx.x] = d_fr_iteration[blockIdx.x*blockDim.x+threadIdx.x];
}*/
}
/**
* Create an inclusive scan output from exclusive scan output.
* Shift array left, and insert the sum of last element of scan and
* last element of input array, at the end of the sum.
*/
__global__ void scan_inclusive(int *d_scanbest, int *d_eliminated, int nPoints) {
if (threadIdx.x > 0)
d_scanbest[threadIdx.x-1] = d_scanbest[threadIdx.x];
//__syncthreads();
//if (threadIdx.x == nPoints-1) {
if (nPoints == 1) {
d_scanbest[nPoints-1] = d_eliminated[nPoints-1];
} else {
d_scanbest[nPoints-1] = d_scanbest[nPoints-2] + d_eliminated[nPoints-1];
}
//}
}
/////////////////////////////////////////////////////////
// Helper methods
/////////////////////////////////////////////////////////
/**
* Prefix-sum sequential on CPU. (Deprecated)
*/
int *sequentialScan(int *eliminated, int n)
{
int *output = (int *) malloc(sizeof(int) *n);
output[0] = eliminated[0];
for (int i = 1; i < n; i++) {
output[i] = output[i-1] + eliminated[i];
}
return output;
}
/**
* Prints a front.
*/
void printfront(FRONT front) {
for (int j = 0; j < front.nPoints; j++)
{
printf("\t");
for (int k = 0; k < n; k++)
{
printf("%f ",front.points[j].objectives[k]);
}
printf("\n");
}
}
/////////////////////////////////////////////////////////
// CPU Functions
/////////////////////////////////////////////////////////
/**
* Determine domination status of point A and B.
*
* returns 1 if point b dominates a
* zero if non-dominating
* returns -1 if point a dominates b
* returns 2 if point is equal
*/
int dominated(POINT a, POINT b) {
int result = 2;
for (int i = 0; i < n; i++) {
if (a.objectives[i] > b.objectives[i]) {
if (result != 1) result = -1; else return 0;
}
if (b.objectives[i] > a.objectives[i]) {
if (result != -1) result = 1; else return 0;
}
}
return result;
}
/**
* compare function for qsort sorting front in the last objective, i.e. increasing from top to bottom
* and we process hypervolumes from the bottom
*/
int compare (const void *a, const void *b)
{
//n == maxDimensions-iteration
for (int i = n - 1; i >= 0; i--) {
if (((*(POINT *)a).objectives[i] > (*(POINT *)b).objectives[i])) return 1;
if (((*(POINT *)a).objectives[i] < (*(POINT *)b).objectives[i])) return -1;
}
return 0;
}
/**
* Returns a sprimed & non-dominating front relative to point p at index.
*/
void limitset(int index, FRONT front) {
int z = front.nPoints-1-index;
/* <1> copy front to device memory */
double h_front[front.nPoints*n];
for (int i = 0; i < front.nPoints; i++) {
for (int j = 0; j < n; j++) {
h_front[i*n+j] = front.points[i].objectives[j];
}
}
cudaMemcpy(d_front, h_front, front.nPoints*n*sizeof(double), cudaMemcpyHostToDevice);
/* <2> Sprimes the front */
sprimeFront<<< z, n >>>( d_temp, d_front, index);
cudaThreadSynchronize(); // block until the device has completed
/* <3> Compute eliminated array */
computeEliminatedArray<<< z, z >>>(d_temp, n, d_eliminated);
cudaThreadSynchronize();
int N = z;
int blockSize = 512;
int nBlocks = N/blockSize + (N%blockSize == 0 ? 0:1);
/* <4> Compute prefix-sum in parallel */
scan_best<<< nBlocks, blockSize/2, sizeof(int)*(blockSize) >>>(d_scanoutput, d_eliminated, blockSize);
cudaThreadSynchronize();
scan_inclusive<<< 1, z >>>(d_scanoutput, d_eliminated, z); //make the result into an inclusive scan result.
cudaThreadSynchronize();
/* <5> Insert the results into temp buffer */
insertResults<<<z,n>>> (d_temp, d_temp2, d_eliminated, d_scanoutput);
cudaThreadSynchronize();
/* <6> Copy final results from device buffer back to host */
cudaMemcpy(h_front, d_temp2, z*n*sizeof(double), cudaMemcpyDeviceToHost);
for (int i = 0; i < z; i++) {
for (int j = 0; j < n; j++) {
fr[iteration].points[i].objectives[j] = h_front[i*n+j];
}
}
/* <7> Update number of points */
cudaMemcpy(&fr[iteration].nPoints, &d_scanoutput[z-1], sizeof(int), cudaMemcpyDeviceToHost); //update number of points
}
/**
* Returns the size of exclusive hypervolume of point p at index relative to a front set.
*/
float ehv(int index, FRONT front) {
//hypervolume of a single poinit
float ehv = 1;
for (int i = 0; i < n; i++) {
ehv *= front.points[index].objectives[i];
}
//if not the last point, then go deeper into the recursion
if (index < front.nPoints-1) {
limitset(index, front); //limit the front relative to index.
iteration++; //slicing
ehv -= hypervolume(fr[iteration-1]); //subtract the hypervolume of the limit set from ehv.
iteration--;
}
return ehv;
}
/**
* Returns the size of hypervolume of a front.
*/
float hypervolume(FRONT front) {
//sort the front with qsort
qsort(front.points, front.nPoints, sizeof (POINT), compare);
//calculate for base case = 2D
if (n==2) {
float vol2d = (front.points[0].objectives[0] * front.points[0].objectives[1]);
for (int i = 1; i < front.nPoints; i++) {
vol2d += (front.points[i].objectives[0]) *
(front.points[i].objectives[1] - front.points[i - 1].objectives[1]);
}
return vol2d;
}
float sumhv = 0;
n--;
//sum all the segments
for (int i = front.nPoints - 1; i >= 0; i--)
//for (int i = 0; i < front.nPoints; i++) //annoying bug that cause inaccurate results
sumhv += front.points[i].objectives[n] * ehv(i, front);
n++;
return sumhv;
}
// creates the front frontsArray[fr-1].points[indexStack[fr-1]+1 ..] in frontsArray[fr],
// with each point bounded by frontsArray[fr-1].points[indexStack[fr-1]]
// and with dominated points removed
void makeDominatedBit()
{
int z = frontsArray[iteration-1].nPoints - 1 - indexStack[iteration-1];
for (int i = 0; i < z; i++) {
for (int j = 0; j < n; j++) {
frontsArray[iteration].points[i].objectives[j] = MIN(frontsArray[iteration-1].points[indexStack[iteration-1]].objectives[j],
frontsArray[iteration-1].points[indexStack[iteration-1] + 1 + i].objectives[j]);
}
}
POINT t; // have to do proper swaps because of the reuse of the memory hierarchy
frontsArray[iteration].nPoints = 1;
for (int i = 1; i < z; i++) {
int j = 0;
bool keep = true;
while (j < frontsArray[iteration].nPoints && keep) {
switch (dominated(frontsArray[iteration].points[i], frontsArray[iteration].points[j])) {
case -1:
t = frontsArray[iteration].points[j];
frontsArray[iteration].points[j] = frontsArray[iteration].points[frontsArray[iteration].nPoints - 1];
frontsArray[iteration].points[frontsArray[iteration].nPoints - 1] = t;
frontsArray[iteration].nPoints--;
break;
case 0:
j++;
break;
// case 2: printf("Identical points!\n");
default:
keep = false;
}
}
if (keep) {
t = frontsArray[iteration].points[frontsArray[iteration].nPoints];
frontsArray[iteration].points[frontsArray[iteration].nPoints] = frontsArray[iteration].points[i];
frontsArray[iteration].points[i] = t;
frontsArray[iteration].nPoints++;
}
}
}
void hvnew() {
// sets hvStack[0] to the hypervolume of frontsArray[0][0 ..]
qsort(frontsArray[0].points, frontsArray[0].nPoints, sizeof(POINT), compare);
indexStack[0] = frontsArray[0].nPoints - 1;
while (indexStack[0] >= 0) { // there are jobs remaining
if (indexStack[iteration] < 0) { // we've finished the jobs at this level: i.e. completed all ehv calculation (HV is complete for that level!)
iteration--;
// compute the single point ehv excluding the last objective
ehvStack[iteration] -= hvStack[iteration+1];
// add the ehv multiplied by the last objective left out due to n--, to the hv stack
hvStack[iteration] += (frontsArray[iteration].points[indexStack[iteration]].objectives[n]) * ehvStack[iteration];
// 1 job is finished for the previous iteration
indexStack[iteration]--;
// finished with next level ehv
n++;
} else if (n == 2) { // do this job using the linear algorithm
//TODO make this work
/*if (indexStack[0] == 0) { //or iteration== 0
hvStack[0] = frontsArray[0].points[0].objectives[0] * frontsArray[0].points[0].objectives[1];
for (int i = 1; i < frontsArray[0].nPoints; i++) {
hvStack[0] += (frontsArray[0].points[i].objectives[0]) *
(frontsArray[0].points[i].objectives[1] - frontsArray[0].points[i - 1].objectives[1]);
}
indexStack[0]--;
n++;
} else {*/
iteration--;
ehvStack[iteration] -= frontsArray[iteration+1].points[0].objectives[0] * frontsArray[iteration+1].points[0].objectives[1];
for (int i = 1; i < frontsArray[iteration+1].nPoints; i++) {
ehvStack[iteration] -= (frontsArray[iteration+1].points[i].objectives[0]) * (frontsArray[iteration+1].points[i].objectives[1] - frontsArray[iteration+1].points[i-1].objectives[1]);
}
hvStack[iteration] += frontsArray[iteration].points[indexStack[iteration]].objectives[n] * ehvStack[iteration];
indexStack[iteration]--;
n++;
//}
} else { // we need to "recurse"
n--;
ehvStack[iteration] = 1;
for (int i = 0; i < n; i++) {
//compute the single point ehv excluding the last objective
ehvStack[iteration] *= frontsArray[iteration].points[indexStack[iteration]].objectives[i];
}
if (indexStack[iteration] == frontsArray[iteration].nPoints - 1) { // first job at this level: set will be empty = no need to recurse
// add the first ehv multiplied by the last objective left out due to n--, to the hv stack
hvStack[iteration] = frontsArray[iteration].points[indexStack[iteration]].objectives[n] * ehvStack[iteration];
indexStack[iteration]--;
// finished with first level ehv (index = nPoints-1), now need to calculate the levels until reach (index = 0)
n++;
} else { // set will be non-empty: create a new job
//go to next level recursion
iteration++;
makeDominatedBit();
qsort(frontsArray[iteration].points, frontsArray[iteration].nPoints, sizeof(POINT), compare);
//reset index stack to the number of points-1
indexStack[iteration] = frontsArray[iteration].nPoints - 1;
}
}
}
}
/**
* Timer Functions
*/
void run(int argc, char *argv[])
{
unsigned int timer = 0;
CUT_DEVICE_INIT(argc, argv);
/////////////////////////////////////////////////////////////////////
// Create and start a timer called "timer"
// alls to create ans start times are enveloped in the CUT_SAFE_CALL
// This CUDA Utility Tool checks for errors upon return.
// If an error is found, it prints out and error message, file name,
// and line number in file where the error can be found
/////////////////////////////////////////////////////////////////////
timer = 0;
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
// Stop the timer
CUT_SAFE_CALL(cutStopTimer(timer));
printf( "Processing time: %f (ms)\n", cutGetTimerValue(timer));
// Delete the timer
CUT_SAFE_CALL(cutDeleteTimer(timer));
}
/**
* Runs a parallel hypervolume
*/
__global__ void hvparallellol() {
// Should call many device functions
//sortParallel();
//d_indexStack[0] = d_frontsArray[0].nPoints - 1;
}
/**
* Runs a parallel hypervolume
*/
//void hvparallel() {
/*int blockSize = 100;
int nBlocks = N/blockSize + (N%blockSize == 0 ? 0:1);
// where N is the parallel threads required
global<<<nBlocks, blockSize>>> ( param , N );
cudaThreadSynchronize();
checkCUDAError("HV parallel failed!");*/
//}
////////////////////////////////////////////////////////////////
// Start of CUDA CODE
////////////////////////////////////////////////////////////////
/**
* Returns a sprimed & non-dominating front relative to point p at index.
*/
void limitset() {
// TODO make this a kernel which calls many device functions
// TODO kernels may need to be passed the number of points in the front (from nPointsStack)
// TODO &d_frontsArray[frontSize*iteration] may need to be changed to d_frontsArray+frontSize*iteration
// sets the number of points in sprimed front
int z = nPointsStack[iteration-1] - 1 - indexStack[iteration-1];
// sprimes the front and store it into temporary storage
sprimeFront<<< z, n >>>( d_temp, d_frontsArray, indexStack[iteration-1]);
cudaThreadSynchronize();
// compute eliminated array and store it in d_eliminated
computeEliminatedArray<<< z, z >>>(d_temp, n, d_eliminated);
cudaThreadSynchronize();
// compute parallel prefix sum and store the result in d_scanoutput
// TODO may need to make use of cudpp for this
scan_best<<< 256, 512/2, sizeof(int)*(512) >>>(d_scanoutput, d_eliminated, 512);
cudaThreadSynchronize();
scan_inclusive<<< 1, z >>>(d_scanoutput, d_eliminated, z); //make the result into an inclusive scan result.
cudaThreadSynchronize();
// compute the results and store it in frontArray
insertResults<<<z,n>>> (d_temp, &d_frontsArray[frontSize*iteration], d_eliminated, d_scanoutput);
cudaThreadSynchronize();
// update number of points to the host
cudaMemcpy(&nPointsStack[iteration], &d_scanoutput[z-1], sizeof(int), cudaMemcpyDeviceToHost); //update number of points
}
/**
* prints a front located on device
*/
void printfront(float *d_front, int numPoints) {
printf("----------------------------------\n");
float *front = (float *) malloc(frontSize*sizeof(float));
cudaMemcpy(front, d_front, frontSize*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < numPoints; i++) {
for (int j = 0; j < n; j++) {
printf("%1.1f ", front[i*pointSize+j]);
}
printf("\n");
}
printf("----------------------------------\n");
free(front);
}
/**
* @param front front to sort
* @param numElements number of points
* @param size size of each point
*/
void parallelSort(float *d_in, int numElements) {
// set the config
CUDPPConfiguration config;
//config.op = CUDPP_MAX;
config.datatype = CUDPP_FLOAT;
config.algorithm = CUDPP_SORT_RADIX_GLOBAL;
config.options = CUDPP_OPTION_FORWARD;
// create the plan
CUDPPHandle sortPlan = 0;
CUDPPResult result = cudppPlan(&sortPlan, config, numElements, 1, 0);
// if not successful then exit
if (CUDPP_SUCCESS != result)
{
printf("Error creating CUDPPPlan\n");
exit(-1);
}
// allocate array for sorted results
float *d_out;
cudaMalloc( (void **) &d_out, numElements*sizeof(float));
// Run the sort
cudppSort(sortPlan, d_out, d_in, numElements);
// TODO reassign pointers and remove costly memcpy operation
//d_in = d_out;
cudaMemcpy(d_in, d_out, numElements*sizeof(float), cudaMemcpyDeviceToDevice);
// Destroy the plan
result = cudppDestroyPlan(sortPlan);
if (CUDPP_SUCCESS != result)
{
printf("Error destroying CUDPPPlan\n");
exit(-1);
}
// TODO reuse config and destroy plan at the end
}
__device__ int binarySearch(float *array, float value, int low, int high) {
while (low <= high) {
int mid = (low+high) / 2;
if (array[mid] > value)
high = mid-1;
else if (array[mid] < value)
low = mid+1;
else
return mid; //found
}
return -1; //not found
}
__global__ void arrange(float *d_out, float *d_in, float *lastObjectives, int objective, int pointSize) {
// conduct binary search on the last objectives
int location = binarySearch(lastObjectives, d_in[threadIdx.x*pointSize+objective], 0, blockDim.x);
// rearrange into a temporary array
for (int i = 0; i < pointSize; i++) {
d_out[location*pointSize+i] = d_in[threadIdx.x*pointSize+i];
}
}
__global__ void sort(float *lastObjectives, float *d_in, int i, int pointSize) {
lastObjectives[threadIdx.x] = d_in[threadIdx.x*pointSize+i];
}
void sortPoints(float *d_in, int numElements) {
float *lastObjectives;
cudaMalloc( (void **) &lastObjectives, numElements*sizeof(float));
// sorts starting at the last objective
for (int i = n-1; i == n-1; i--) {
//for (int i = n-1; i >= 0; i--) {
// set the lastObjectives to be sorted
sort<<<1, numElements>>>(lastObjectives, d_in, n-1, pointSize);
//sorts the lastObjectives
parallelSort(lastObjectives, numElements);
// allocate array for arranged results
float *d_out;
cudaMalloc( (void **) &d_out, numElements*pointSize*sizeof(float));
//arrange the order according to the last objectives
arrange<<< 1, numElements>>>(d_out, d_in, lastObjectives, n-1, pointSize);
// copy d_out back to d_in
cudaMemcpy(d_in, d_out, numElements*pointSize*sizeof(float), cudaMemcpyDeviceToDevice);
}
cudaFree(lastObjectives);
}
//////////////////////////////////////////////////////////
// HV CUDA
//////////////////////////////////////////////////////////
__host__ void set(float *d_ehvStack, int iteration) {
d_ehvStack[iteration] = 1;
}
void hvparallel() {
// sort the array
sortPoints(&d_frontsArray[frontSize*0], nPointsStack[0]); // sorts the points located in front[0], use nPointsStack[0] for the number of points
indexStack[0] = nPointsStack[0] - 1;
// TODO host cannot access device memory ehv, and hv and frontsArray, need CUDA kernels for this
// TODO d_frontsArray , d_hvStack and d_ehvStack is not possible
while (indexStack[0] >= 0) {
if (indexStack[iteration] < 0) {
iteration--;
ehvStack[iteration] -= hvStack[iteration+1];
hvStack[iteration] += d_frontsArray[frontSize*iteration+pointSize*indexStack[iteration]+n] * ehvStack[iteration];
indexStack[iteration]--;
n++;
} else if (n == 2) {
iteration--;
ehvStack[iteration] -= d_frontsArray[frontSize*(iteration+1)+pointSize*0+0] * d_frontsArray[frontSize*(iteration+1)+pointSize*0+1];
for (int i = 1; i < nPointsStack[iteration+1]; i++) {
ehvStack[iteration] -= d_frontsArray[frontSize*(iteration+1)+pointSize*i+0] *
(d_frontsArray[frontSize*(iteration+1)+pointSize*i+1] - d_frontsArray[frontSize*(iteration+1)+pointSize*(i-1)+1]);
}
hvStack[iteration] += d_frontsArray[frontSize*iteration+pointSize*indexStack[iteration]+n] * ehvStack[iteration];
indexStack[iteration]--;
n++;
} else {
n--;
// TODO not allowed segmented fault
d_ehvStack[iteration] = 1;
for (int i = 0; i < n; i++) {
ehvStack[iteration] *= d_frontsArray[frontSize*iteration+pointSize*indexStack[iteration]+i];
}
if (indexStack[iteration] == nPointsStack[iteration] - 1) {
hvStack[iteration] = d_frontsArray[frontSize*iteration+pointSize*indexStack[iteration]+n] * ehvStack[iteration];
indexStack[iteration]--;
n++;
} else {
iteration++;
makeDominatedBit();
sortPoints(&d_frontsArray[frontSize*iteration], nPointsStack[iteration]);
indexStack[iteration] = nPointsStack[iteration]-1;
}
}
}
}
/////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////
int main(int argc, char *argv[]) {
//CUT_DEVICE_INIT(argc, argv);
// read the file
FILECONTENTS *f = readFile(argv[1]);
// start the timer
struct timeval tv1, tv2;
struct rusage ru_before, ru_after;
getrusage (RUSAGE_SELF, &ru_before);
int maxDimensions = 0; //the max number of dimensions in the fronts
int maxPoints = 0; //the max number of points in the fronts
// find the max number of Points, and the max number of Dimensions
for (int i = 0; i < f->nFronts; i++) {
if (f->fronts[i].nPoints > maxPoints)
maxPoints = f->fronts[i].nPoints;
if (f->fronts[i].n > maxDimensions)
maxDimensions = f->fronts[i].n;
}
/* allocate for cuda memory */
cudaMalloc( (void **) &d_temp, maxPoints*maxDimensions*sizeof(float));
cudaMalloc((void **) &d_scanoutput, (512)*sizeof(int));
cudaMalloc((void **) &d_eliminated, (512)*sizeof(int));
cudaMalloc( (void **) &d_front, maxPoints*maxDimensions*sizeof(float));
cudaMalloc((void**) &d_temp2, sizeof(float)*maxPoints*maxDimensions);
// allocate cuda memory
frontSize = maxPoints*maxDimensions;
pointSize = maxDimensions;
cudaMalloc((void **) &d_frontsArray, frontSize * maxDimensions * sizeof(float));
cudaMalloc((void **) &d_ehvStack, sizeof(float) * maxDimensions);
cudaMalloc((void **) &d_hvStack, sizeof(float) * maxDimensions);
// allocate cpu memory Stacks
indexStack = (int *) malloc(sizeof(int) * maxDimensions);
nPointsStack = (int *) malloc(sizeof(int) * maxDimensions);
// process each front to get the hypervolumes
for (int i = 0; i < f->nFronts; i++) {
// read each front
FRONT front = f->fronts[i];
n = front.n;
nPointsStack[0] = front.nPoints;
// CHECK UNIQUE NESS OF OBJECTIVES
for (int x = n-1; x >= 0; x--) {
for (int j = 0; j < front.nPoints; j++) {
for (int k = 0; k < front.nPoints; k++) {
if (k == j) continue; //avoid checking against itself
if (front.points[j].objectives[x] == front.points[k].objectives[x]) {
fprintf(stderr, "data set are not unique in every objectives\n");
printf("error!!!\n");
exit(EXIT_FAILURE);
}
}
}
}
// copy front to device memory
float h_front[front.nPoints*pointSize];
for (int j = 0; j < front.nPoints; j++) {
for (int k = 0; k < n; k++) {
h_front[j*pointSize+k] = front.points[j].objectives[k];
}
}
cudaMemcpy(d_frontsArray, h_front, frontSize*sizeof(float), cudaMemcpyHostToDevice);
// run hv parallel
hvparallel();
// copy back hvresult
float hvResult[1];
cudaMemcpy(hvResult, &d_hvStack[0], sizeof(float), cudaMemcpyDeviceToHost);
// print them out
printf("Calculating Hypervolume for Front:%d...\n", i+1);
printf("\t\t\t\t\t%1.10f\n", hvResult);
}
// stop timer
getrusage (RUSAGE_SELF, &ru_after);
tv1 = ru_before.ru_utime;
tv2 = ru_after.ru_utime;
printf("Average time = %fs\n", (tv2.tv_sec + tv2.tv_usec * 1e-6 - tv1.tv_sec - tv1.tv_usec * 1e-6) / f->nFronts);
// TODO free the storage
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err) );
exit(-1);
}
}
|
dc04316d88ad8f4e1e4c4ff475f89558d98196bf.hip
|
// !!! This is a file automatically generated by hipify!!!
//P2P Synchronization using events and Seperate streams. Coupling-Overlapping several Exchanges together.
#include <omp.h>
#include "tinyxml.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "testMultiGPU_Jacobi2D_Decom.cuh"
#include <iostream>
#include <chrono>
#include <memory>
#include <vector>
#include <fstream>
#include <hip/hip_vector_types.h>
#define IMUL(a,b) __mul24(a,b)
#define DIVRND(a,b) ((a+b-1)/b)
#define BLOCKSIZE_X 32
#define BLOCKSIZE_Y 16
using namespace std;
using namespace std::chrono;
//hipError_t performMultiGPUJacobi();
//Support for below c++14 on *nix
template<typename T, typename ...Args>
std::unique_ptr<T> make_unique(Args&& ...args)
{
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
struct create_Device
{
int deviceID;
//In a GPU topology set the GPU position
int devicePosition_X;
int devicePosition_Y;
int devicePosition_Z;
vector<float> eHalo;
vector<float> wHalo;
vector<float> nHalo;
vector<float> sHalo;
//Flags check the halos needed by the device
int eHalo_flag = 0;
int wHalo_flag = 0;
int nHalo_flag = 0;
int sHalo_flag = 0;
};
//Simple Jacobi iteration
__global__ void jacobi_Simple(const float *A0, const float *A1, const float *A2, const float *A3, const float *A4, float *x_in, float *x_out, const float *rhs, const int ehalo_flag, const int whalo_flag, const int nhalo_flag, const int shalo_flag, float *ehalo, float *whalo, float *nhalo, float *shalo, const int deviceID, const int numDevices, const int domain_Decom, int2 dim)
{
int2 pos = make_int2(
blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y
);
int index = (pos.y * dim.x) + pos.x;
//int index = threadIdx.x + blockDim.x * blockIdx.x;
float result = rhs[index];
int dim_x = dim.x;
int dim_y = dim.y;
//X_pos and Y_pos are just to understand the thread layout. Can be named to any suitable variable names
int x_pos = pos.y;
int y_pos = pos.x;
//result = nhalo[y_pos];
//x_out[index] = result;
//Get the boundaries
int leftBoundaryElem = x_pos * (dim_x);
int rightBoundaryElem = (x_pos * dim_x) + (dim_x - 1);
int topBoundaryElem = y_pos + ((dim_y - 1) * (dim_x));
int bottomBoundaryElem = y_pos;
/*if((deviceID==2)&&(index==leftBoundaryElem))
{
printf("For Device %d index is : %d\n", deviceID, index);
printf("For Device %d leftBoundaryElem is : %d\n", deviceID, leftBoundaryElem);
printf("rightBoundaryElem is : %d\n", rightBoundaryElem);
printf("topBoundaryElem is : %d\n", topBoundaryElem);
printf("bottomBoundaryElem is : %d\n", bottomBoundaryElem);
}*/
//Halo computation for 1D Decompostion: For the First and Last GPU Halo computation on both the sides(nhalo and shalo wont be needed)
if (domain_Decom == 1)
{
if (numDevices > 1)
{
//First GPU
if (deviceID == 0) {
//We need to use nhalos
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
//The top boundary needs element from nhalo
if (index == topBoundaryElem)
//nHalos
result -= A4[index] * nhalo[y_pos];
result /= A2[index];
x_out[index] = result;
//Update Halo at the end of computation
if (index == topBoundaryElem)
//nHalos updated
nhalo[y_pos] = result;
return;
}
//Last GPU
else if (deviceID == (numDevices - 1)) {
//We need to use shalos
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
//The Bottom boundary needs elements from shalo
if (index == bottomBoundaryElem)
//nHalos
result -= A0[index] * shalo[y_pos];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
result /= A2[index];
x_out[index] = result;
//Update Halo at the end of computation
if (index == bottomBoundaryElem)
//sHalos updated
shalo[y_pos] = result;
return;
}
//For all the middle GPUs
else
{
//We need to use both shalos and nhalos
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
//The Bottom boundary needs elements from shalo
if (index == bottomBoundaryElem)
//nHalos
result -= A0[index] * shalo[y_pos];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
//The top boundary needs element from nhalo
if (index == topBoundaryElem)
//nHalos
result -= A4[index] * nhalo[y_pos];
result /= A2[index];
x_out[index] = result;
//Update Halo at the end of computation
if (index == bottomBoundaryElem)
//sHalos updated
shalo[y_pos] = result;
//Update Halo at the end of computation
if (index == topBoundaryElem)
//nHalos updated
nhalo[y_pos] = result;
return;
}
}
}
else if (domain_Decom == 2) {
//======Left Bounday Elem
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
//Computation using the Halos
if (index == leftBoundaryElem) {
if (whalo_flag == 1) {
result -= A1[index] * whalo[x_pos];
}
}
//======Right Bounday Elem
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index == rightBoundaryElem) {
if (ehalo_flag == 1) {
result -= A3[index] * ehalo[x_pos];
}
}
//======Bottom Bounday Elem
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
if (index == bottomBoundaryElem) {
if (shalo_flag == 1) {
result -= A0[index] * shalo[y_pos];
}
}
//======Top Bounday Elem
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
if (index == topBoundaryElem) {
if (nhalo_flag == 1) {
result -= A4[index] * nhalo[y_pos];
}
}
result /= A2[index];
x_out[index] = result;
//Updating Halos at the End of the computation
if (index == topBoundaryElem) {
if (nhalo_flag == 1) {
nhalo[y_pos] = result;
}
}
if (index == bottomBoundaryElem) {
if (shalo_flag == 1) {
shalo[y_pos] = result;
}
}
if (index == leftBoundaryElem) {
if (whalo_flag == 1) {
whalo[x_pos] = result;
}
}
if (index == rightBoundaryElem) {
if (ehalo_flag == 1) {
ehalo[x_pos] = result;
}
}
return;
}
//For computations on a Machine with a single GPU
else
{
{//For some reason order of computation (left,right,top and bottom) gives a different result
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
result /= A2[index];
x_out[index] = result;
return;
}
}
}
//========================MultiGPU utility functions============================================================================
// load the named file and dump its structure to STDOUT
void getConfiguration(const char* pFilename, int &numDevices, int &domain_decom)
{
TiXmlDocument doc(pFilename);
bool loadOkay = doc.LoadFile();
if (loadOkay)
{
cout <<"\nFile Loaded successfully\n" ;
TiXmlElement *pRoot = doc.RootElement();
TiXmlElement *element = pRoot->FirstChildElement();
while (element)
{
string elementName = element->Value();
string attribute = element->Attribute("name"); //Gets you the time variable
string value = element->GetText();
cout << "\n The attribute is "<<attribute;
cout << "\n The elementName is " << elementName;
cout << "\n The element Value is " << value;
if (attribute=="numDevices") {
numDevices = stoi(value);
}
if (attribute == "decomposition") {
domain_decom = stoi(value);
}
element = element->NextSiblingElement();
}
}
else
{
cout << "\nCould not load config file\n";
}
}
void checkP2Paccess(int numGPUs)
{
for (int i = 0; i<numGPUs; i++)
{
hipSetDevice(i);
for (int j = 0; j<numGPUs; j++)
{
int access;
if (i != j)
{
hipDeviceCanAccessPeer(&access, i, j);
if (auto err = hipGetLastError())
{
cout << "P2P Operations failed : " << hipGetErrorString(err) << endl;
return;
}
}
}
}
cout << "\n***NOTE: In case a device doesn't have P2P access to other one, it falls back to normal memcopy procedure.\nSo you can see lesser Bandwidth (GB/s) in those cases.\n\n";
}
bool enableP2P(int numGPUs)
{
for (int i = 0; i<numGPUs; i++)
{
hipSetDevice(i);
for (int j = 0; j<numGPUs; j++)
{
int access;
hipDeviceCanAccessPeer(&access, i, j);
if (auto err = hipGetLastError())
{
cout << "P2P Operations failed while enabling: " << hipGetErrorString(err) << endl;
return false;
}
if (access)
{
hipDeviceEnablePeerAccess(j, 0);
if (auto err = hipGetLastError())
{
cout << "P2P Operations failed while enabling: " << hipGetErrorString(err) << endl;
return false;
}
}
}
}
return true;
}
void disableP2P(int numGPUs)
{
for (int i = 0; i<numGPUs; i++)
{
hipSetDevice(i);
for (int j = 0; j<numGPUs; j++)
{
int access;
hipDeviceCanAccessPeer(&access, i, j);
if (auto err = hipGetLastError())
{
cout << "P2P Operations failed while disabling : " << hipGetErrorString(err) << endl;
return;
}
if (access)
{
hipDeviceDisablePeerAccess(j);
if (auto err = hipGetLastError())
{
cout << "P2P Operations failed while disabling: " << hipGetErrorString(err) << endl;
return;
}
}
}
}
}
void performFactorPairing(int numDevices, int &fact_x, int &fact_y)
{
int i;
//Check if numDevices is Prime
bool isPrime = true;
for (i = 2; i < numDevices / 2; ++i)
{
if (numDevices % i == 0)
{
isPrime = false;
break;
}
}
if (isPrime)
{
fact_x = numDevices;
fact_y = 1;
}
else
{
//Finding the appropriate factor pairs to divide the grid
for (i = 2; i < numDevices / 2; ++i)
{
if (numDevices % i == 0) {
fact_x = i;
fact_y = numDevices / i;
}
}
}
}
//===============================================================================================================================
//====================================Creating Topology with the number of Devices available====================================
void generateGPUGRID(int numDevices, int &numberOfDevicesAlong_X, int &numberOfDevicesAlong_Y, int domainDecomType)
{
//Finding GPU topology along x and y
//Assumuing total number of devices is a perfect square(To be changed later)
if(domainDecomType==1)
{
numberOfDevicesAlong_X = numDevices;
numberOfDevicesAlong_Y = 1;
}
else
{
int val = -1;
val = (int)sqrt(numDevices);
if ((val*val) == numDevices)
{
numberOfDevicesAlong_X = val;
numberOfDevicesAlong_Y = val;
}
else
{
int fact_x = 1;
int fact_y = 1;
performFactorPairing(numDevices, fact_x, fact_y);
numberOfDevicesAlong_X = fact_x;
numberOfDevicesAlong_Y = fact_y;
}
}
}
/* Creates a topology for a number of devices in a system
for ex. The devices are aware of left, right, top and bottom neigbours in 2D
1. It also decides the chunk per devices by determining x-dimension and y-dimensions for per chunk of data per device.
2. It also initializes halos for each devices which can be exchanged with the neighbours
*/
void createTopology(int numDevices, vector<create_Device> &deviceArray, int numberOfDevicesAlong_X, int numberOfDevicesAlong_Y)
{
deviceArray.resize(numDevices);
unsigned int deviceCount = 0;
for (int gridCount_Y = 0; gridCount_Y < numberOfDevicesAlong_Y; gridCount_Y++) {
for (int gridCount_X = 0; gridCount_X < numberOfDevicesAlong_X; gridCount_X++) {
deviceArray[deviceCount].deviceID = deviceCount;
deviceArray[deviceCount].devicePosition_X = gridCount_X;
deviceArray[deviceCount].devicePosition_Y = gridCount_Y;
//devicePosition_Z to be changed later
deviceArray[deviceCount].devicePosition_Z = 1;
deviceCount++;
}
}
}
//==============================================================================================================================
//Init Halos: In 1D decomposition only North and South Halos are used. In 2D decomposition North, South, East and West Halo need to be initialized and computed
//TODO:Create a Halo Exchange Mechanism for 2D Multi GPU topology
void initHalos2D(create_Device &device, int chunk_X, int chunk_Y, float *vec_in, int maxdevicesAlong_X, int maxDevicesAlong_Y, int rowStartPos, int rowEndPos, int dim)
{
/*cout << endl << "Inside Halo Computation 2D. printing Details";
cout << endl << "Device ID " << device.deviceID;
cout << endl << "Device position X " << device.devicePosition_X;
cout << endl << "Device position Y " << device.devicePosition_Y;
cout << endl << "Row Start " << rowStartPos;
cout << endl << "Row End " << rowEndPos;*/
//Assigning counter for each individual Halos. To prevent update of the same counter
//int rowStartPosEast = rowStartPos;
int rowStartPosWest = rowStartPos;
int rowStartPosNorth = rowStartPos;
int rowStartPosSouth = rowStartPos;
int rowEndPosEast = rowEndPos;
//int rowEndPosWest = rowEndPos;
//int rowEndPosNorth = rowEndPos;
//int rowEndPosSouth = rowEndPos;
//Checks provided for Boundary devices in GPU topology
if ((device.devicePosition_X - 1) >= 0) {
//cout << "West Halo needed ";
device.wHalo_flag = 1;
device.wHalo.resize(chunk_Y);
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
device.wHalo[rowNum] = vec_in[rowStartPosWest];
//cout << rowStartPosWest << " ";
rowStartPosWest += dim;
}
}
if ((device.devicePosition_X + 1) < maxdevicesAlong_X) {
//cout << "East Halo needed ";
device.eHalo_flag = 1;
device.eHalo.resize(chunk_Y);
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
device.eHalo[rowNum] = vec_in[rowEndPosEast];
//cout << rowEndPosEast << " ";
rowEndPosEast += dim;
}
}
if ((device.devicePosition_Y - 1) >= 0) {
//cout << "South Halo needed ";
device.sHalo_flag = 1;
device.sHalo.resize(chunk_X);
for (int rowNum = 0; rowNum < chunk_X; rowNum++)
{
device.sHalo[rowNum] = vec_in[rowStartPosSouth];
//cout << rowStartPosSouth << " ";
rowStartPosSouth++;
}
}
if ((device.devicePosition_Y + 1) < maxDevicesAlong_Y) {
//cout << "North Halo needed ";
device.nHalo_flag = 1;
device.nHalo.resize(chunk_X);
rowStartPosNorth = rowStartPosNorth + (dim * (chunk_Y - 1));
for (int rowNum = 0; rowNum < chunk_X; rowNum++)
{
device.nHalo[rowNum] = vec_in[rowStartPosNorth];
//cout << rowStartPosNorth << " ";
rowStartPosNorth++;
}
}
}
//======================================Exchange Halos: on Host==============================================
int getDeviceIDfromCoord(int devCoord_x, int devCoord_y, int numberofDevicesAlong_X) {
int devID = (devCoord_y * numberofDevicesAlong_X) + devCoord_x;
return devID;
}
void exchangehalos_onHost(int numDevices, vector<create_Device> &deviceArray, int numberofDevicesAlong_X)
{
//Halos exist in pairs so:
//Important: A device exchanges North-to-South Pairs and East-to-West Pairs only. Not South-to-North pairs and West-to-East pairs
//That way the number of exchanges are kept to minimum
for (int dev = 0; dev < numDevices; dev++)
{
int getDevCoord_X = deviceArray[dev].devicePosition_X;
int getDevCoord_Y = deviceArray[dev].devicePosition_Y;
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1) {
int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X , getDevCoord_Y+1, numberofDevicesAlong_X);
//Exchange Halos
(deviceArray[dev].nHalo).swap(deviceArray[devIDtoNorth].sHalo);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1) {
int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X+1, getDevCoord_Y , numberofDevicesAlong_X);
//Exchange Halos
(deviceArray[dev].eHalo).swap(deviceArray[devIDtoEast].wHalo);
}
}
}
bool exchangehalos_onHostPinned(int numDevices, vector<create_Device> &deviceArray, int numberofDevicesAlong_X, vector<float*> &nHalosPinned, vector<float*> &sHalosPinned, vector<float*> &eHalosPinned, vector<float*> &wHalosPinned)
{
//Halos exist in pairs so:
//Important: A device exchanges North-to-South Pairs and East-to-West Pairs only. Not South-to-North pairs and West-to-East pairs
//That way the number of exchanges are kept to minimum
for (int dev = 0; dev < numDevices; dev++)
{
int getDevCoord_X = deviceArray[dev].devicePosition_X;
int getDevCoord_Y = deviceArray[dev].devicePosition_Y;
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1) {
int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X , getDevCoord_Y+1, numberofDevicesAlong_X);
//Exchange Halos
//(deviceArray[dev].nHalo).swap(deviceArray[devIDtoNorth].sHalo);
swap(nHalosPinned[dev], sHalosPinned[devIDtoNorth]);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1) {
int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X+1, getDevCoord_Y, numberofDevicesAlong_X);
//Exchange Halos
//(deviceArray[dev].eHalo).swap(deviceArray[devIDtoEast].wHalo);
swap(eHalosPinned[dev], wHalosPinned[devIDtoEast]);
}
}
return true;
}
//===========================Exchange Halos: on Host Ends=====================================================
//Init matrix Diagonals A0, A1, A2, A3, A4
void copyValues(float *A0, float *A1, float *A2, float *A3, float *A4, float *rhs, float *vec_in, float *vec_out, int dim, float *val_A0, float *val_A1, float *val_A2, float *val_A3, float *val_A4, float *val_rhs, float *val_x_in)
{
unsigned int size = dim * dim;
for (unsigned int i = 0; i < size; i++)
{
A0[i] = val_A0[i];
A1[i] = val_A1[i];
A2[i] = val_A2[i];
A3[i] = val_A3[i];
A4[i] = val_A4[i];
rhs[i] = val_rhs[i];
vec_in[i] = val_x_in[i];
vec_out[i] = 0.0f;
}
}
void getAllDeviceProperties() {
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
cout << " Device Number: " << i << endl;
cout << " Device name: " << prop.name << endl;
cout << " Memory Clock Rate (KHz): " << prop.memoryClockRate << endl;
cout << " Memory Bus Width (bits): " << prop.memoryBusWidth << endl;;
cout << " Peak Memory Bandwidth (GB/s): " << 2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6 << endl << endl << endl;
}
}
/* Prints an output file for checking results */
void sendToPrint(float *partial_result, int devicePosition_X, int devicePosition_Y, int numberOfDevicesAlong_X, int chunk_X, int chunk_Y, int dim, int totalSize, vector<float> &result, int numDevices, int currentIteration, int numberOfTotalIterations) {
int devicePosX = devicePosition_X;
int devicePosY = devicePosition_Y;
//Calculating data position based on device coords
//numberOfDevicesAlong_X * Chunk_X * Chunk_Y : finds out the total data per row of GPUs allocated
//int dataStartPos_X = (devicePosX * numberOfDevicesAlong_X * chunk_X * chunk_Y) + (devicePosY * chunk_X);
int dataStartPos_X = (devicePosY * dim * chunk_Y) + (devicePosX * chunk_X);
int dataEndPos_X = dataStartPos_X + chunk_X;
//One complete row across all GPU is dim in order to get the next element above an element we add (currentPosition + dim )
int rowStartPos = dataStartPos_X;
int rowEndPos = dataEndPos_X;
int indexCounter = 0;
//cout << endl;
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
//Get one complete row for the GPU
for (int pos = rowStartPos; pos < rowEndPos; pos++)
{
result[pos] = partial_result[indexCounter];
indexCounter++;
}
//cout << endl;
rowStartPos += dim;
rowEndPos = rowStartPos + chunk_X;
}
//Printing when the last device computation is done: Remove the check to check computation for each device
int deviceID = getDeviceIDfromCoord(devicePosition_X, devicePosition_Y, numberOfDevicesAlong_X);
if ((deviceID == (numDevices - 1)) && (currentIteration == (numberOfTotalIterations - 1)))
{
ofstream myfile;
myfile.open("data2.txt");
//Printing the values here
for (int i = totalSize; i > 0; i--) {
if (i%dim == 0) {
myfile << endl;
}
myfile << result[i - 1] << " ";
}
myfile.close();
}
}
hipError_t performMultiGPUJacobi(unsigned int val_dim, unsigned int numJacobiIt, float* val_A0, float* val_A1, float* val_A2, float* val_A3, float* val_A4, float* val_rhs, float* val_x_in)
{
//Fixed value changed later
int dim = 8;
if (val_dim != 0) {
dim = val_dim;
}
//TODO: write a 2D domain decomposition method for more than 2 GPUs
int size = dim * dim;
//auto result = make_unique<float[]>(size);
//Create Diagonal Vectors
std::vector<float> a0(size);
std::vector<float> a1(size);
std::vector<float> a2(size);
std::vector<float> a3(size);
std::vector<float> a4(size);
std::vector<float> vec_in(size);
std::vector<float> vec_out(size);
std::vector<float> rhs(size);
std::vector<float> result(size);
//Get the total number of devices
int numDevices = -1;
hipGetDeviceCount(&numDevices);
//numDevices = 2;
//Set Decomposition dimension 1D or 2D: when decomposition is 0. Computation happens on a single GPU
int decom_Dim = 2;
//Set Values for Domain Decompostion type 1D or 2D
int domainDecom_Dim = decom_Dim;
//Read the custom config defined in file "multiGPUConfig.xml"
getConfiguration("multiGPUConfig.xml", numDevices, domainDecom_Dim);
cout << endl << "Total number of Devices in the System are : " << numDevices << endl;
getAllDeviceProperties();
//Enable Peer-to-Peer access across all GPUs : Done on phase 2 of development
bool p2penabled = false;
p2penabled = enableP2P(numDevices);
//Configuring the number of GPU's manually
//numDevices=2;
copyValues(&a0[0], &a1[0], &a2[0], &a3[0], &a4[0], &rhs[0], &vec_in[0], &vec_out[0], dim, &val_A0[0], &val_A1[0], &val_A2[0], &val_A3[0], &val_A4[0], &val_rhs[0], &val_x_in[0]);
vector<create_Device> deviceArray;
/* Distributed Compuation using Halos: Algorithm
1. Init Halos.
1.a) In 1D decomposition nhalo and shalo intialized from vector x_in
1.b) In 2D decompsition nhalo,shalo, ehalo and whalo initialozed from vector x_in
2. Pass the halos to Jacobi_kernal.
3. Store the result computed at the boundary into the halo boundary positions.
4. Swap nhalo and shalo pairs in 1D decompostion. Swap (nhalo,shalo) and (ehalo,whalo) in 2D.
*/
//=================================Domain Decomposition Logic Starts=================================================================
/*Generating a GPU Grid with multiple GPUs and creating a Topology*/
int numberOfDevicesAlong_X = 1;
int numberOfDevicesAlong_Y = 1;
generateGPUGRID(numDevices, numberOfDevicesAlong_X, numberOfDevicesAlong_Y, domainDecom_Dim);
cout << "GPU grid structure is : " << numberOfDevicesAlong_X << " X " << numberOfDevicesAlong_Y << endl;
//Total elements along each dim in 2D
int chunk_X = dim / numberOfDevicesAlong_X;
int chunk_Y = dim / numberOfDevicesAlong_Y;
/* Creating a GPU topology with multiple devices*/
createTopology(numDevices, deviceArray, numberOfDevicesAlong_X, numberOfDevicesAlong_Y);
//Let the total number of GPU be 2 : has to be changed later
//Computation divided into (size/2) on first and size-(size/2) on second
std::vector<int> domainDivision(numDevices);
//Logic for total chunk per device (Domain distribution)
for (int i = 0; i < numDevices; i++) {
//Chunk per GPU will be same irrepective of 1D or 2D decomposition
domainDivision[i] = size / numDevices;
}
//For use on Device
std::vector<float*>d_A0(numDevices);
std::vector<float*>d_A1(numDevices);
std::vector<float*>d_A2(numDevices);
std::vector<float*>d_A3(numDevices);
std::vector<float*>d_A4(numDevices);
std::vector<float*>d_Vec_In(numDevices);
std::vector<float*>d_Vec_Out(numDevices);
std::vector<float*>d_nhalos(numDevices);
std::vector<float*>d_shalos(numDevices);
std::vector<float*>d_ehalos(numDevices);
std::vector<float*>d_whalos(numDevices);
std::vector<float*>d_Rhs(numDevices);
//Device Buffers for parallel communication using streams: Concept of Front and Back Buffer Oct 30, 2017
std::vector<float*>x_buffer_north(numDevices);
std::vector<float*>x_buffer_south(numDevices);
std::vector<float*>y_buffer_west(numDevices);
std::vector<float*>y_buffer_east(numDevices);
//Note: Using Pinned memory on Host for Halos -> Performance Approach 1
vector<float*>nHalo_pinned(numDevices);
vector<float*>sHalo_pinned(numDevices);
vector<float*>wHalo_pinned(numDevices);
vector<float*>eHalo_pinned(numDevices);
for (int dev = 0; dev < numDevices; dev++)
{
hipSetDevice(dev);
hipHostMalloc((void**)&nHalo_pinned[dev], (chunk_X) * sizeof(float));
hipHostMalloc((void**)&sHalo_pinned[dev], (chunk_X) * sizeof(float));
hipHostMalloc((void**)&wHalo_pinned[dev], (chunk_Y) * sizeof(float));
hipHostMalloc((void**)&eHalo_pinned[dev], (chunk_Y) * sizeof(float));
}
for (int dev = 0; dev < numDevices; dev++)
{
//Setting the device before allocation
hipSetDevice(dev);
//cudamalloc the Diagonals
hipMalloc((void**)&d_A0[dev], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_A1[dev], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_A2[dev], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_A3[dev], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_A4[dev], domainDivision[dev] * sizeof(float));
//Using pinned memory as part of performance upgrade- Phase 2 of development
//cudamalloc the Input Vector and Result vector
hipMalloc((void**)&d_Vec_In[dev], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_Vec_Out[dev], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_Rhs[dev], domainDivision[dev] * sizeof(float));
//hipMalloc Halos: North and South--1D. TODO: East and West for 2D
hipMalloc((void**)&d_nhalos[dev], chunk_X * sizeof(float));
hipMalloc((void**)&d_shalos[dev], chunk_X * sizeof(float));
hipMalloc((void**)&d_ehalos[dev], chunk_Y * sizeof(float));
hipMalloc((void**)&d_whalos[dev], chunk_Y * sizeof(float));
//Buffer memory used for p2p exchange
hipMalloc((void**)&x_buffer_north[dev], chunk_X * sizeof(float));
hipMalloc((void**)&x_buffer_south[dev], chunk_X * sizeof(float));
hipMalloc((void**)&y_buffer_west[dev], chunk_Y * sizeof(float));
hipMalloc((void**)&y_buffer_east[dev], chunk_Y * sizeof(float));
}
/* The transfer of Data from Host to Device : Domain Decomposition in 2D*/
if (decom_Dim == 2) {
//Create Partial Diagonal Vectors
//Size per GPU will be
int chunkSize = chunk_X * chunk_Y;
std::vector<float> partial_a0(chunkSize);
std::vector<float> partial_a1(chunkSize);
std::vector<float> partial_a2(chunkSize);
std::vector<float> partial_a3(chunkSize);
std::vector<float> partial_a4(chunkSize);
std::vector<float> partial_vec_in(chunkSize);
std::vector<float> partial_vec_out(chunkSize);
std::vector<float> partial_rhs(chunkSize);
std::vector<float> partial_result(chunkSize);
for (int dev = 0; dev < numDevices; dev++)
{
//Test the properties of the device assigned
//cout << endl << "New Logical Device created " << deviceArray[dev].deviceID;
//cout << endl << "New Logical Device (X,Y) coord (" << deviceArray[dev].devicePosition_X << "," << deviceArray[dev].devicePosition_Y << ")";
//==========Important: Logic for creation of Chunks to be allocated to GPUs==========================================
//Important : Mention about the correlation between the topology and data position in the thesis
int devicePosX = deviceArray[dev].devicePosition_X;
int devicePosY = deviceArray[dev].devicePosition_Y;
//cout << endl << "For Device ID " << deviceArray[dev].deviceID << endl;
//cout << endl << "Device pos X " << devicePosX << endl;
//cout << endl << "Device pos Y " << devicePosY << endl;
//cout << endl << "Chunk X " << chunk_X << endl;
//cout << endl << "Chunk Y " << chunk_Y << endl;
//cout << endl << "Number of device along X " << numberOfDevicesAlong_X << endl;
//cout << endl << "Number of device along Y " << numberOfDevicesAlong_Y << endl;
//Calculating data position based on device coords
//numberOfDevicesAlong_X * Chunk_X * Chunk_Y : finds out the total data per row of GPUs allocated
//int dataStartPos_X = (devicePosX * numberOfDevicesAlong_X * chunk_X * chunk_Y) + (devicePosY * chunk_X);
int dataStartPos_X = (devicePosY * dim * chunk_Y) + (devicePosX * chunk_X);
int dataEndPos_X = dataStartPos_X + chunk_X;
//cout << endl << "Data Start Pos is " << dataStartPos_X << endl;
//cout << endl << "Data End Pos is " << dataEndPos_X << endl;
//One complete row across all GPU is dim in order to get the next element above an element we add (currentPosition + dim )
int rowStartPos = dataStartPos_X;
int rowEndPos = dataEndPos_X;
int indexCounter = 0;
//Initialize Halos
initHalos2D(deviceArray[dev], chunk_X, chunk_Y, &vec_in[0], numberOfDevicesAlong_X, numberOfDevicesAlong_Y, rowStartPos, rowEndPos - 1, dim);
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
//cout << endl << "Data Start Pos is " << rowStartPos << endl;
//Get one complete row for the GPU
for (int pos = rowStartPos; pos < rowEndPos; pos++)
{
partial_a0[indexCounter] = a0[pos];
partial_a1[indexCounter] = a1[pos];
partial_a2[indexCounter] = a2[pos];
partial_a3[indexCounter] = a3[pos];
partial_a4[indexCounter] = a4[pos];
partial_vec_in[indexCounter] = vec_in[pos];
partial_vec_out[indexCounter] = vec_out[pos];
partial_rhs[indexCounter] = rhs[pos];
partial_result[indexCounter] = result[pos];
indexCounter++;
}
//cout << endl << "Data End Pos is " << rowEndPos << endl;
rowStartPos += dim;
rowEndPos = rowStartPos+chunk_X;
}
//==========Important: Logic for creation of Chunks to be allocated to GPUs Ends ==========================================
//Setting Cuda device
hipSetDevice(dev);
//Copy the diagonals from host to device : calling all at once instead of putting inside the for loop
hipMemcpy(d_A0[dev], &partial_a0[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_A1[dev], &partial_a1[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_A2[dev], &partial_a2[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_A3[dev], &partial_a3[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_A4[dev], &partial_a4[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
//Copy in and out vectors and RHS
hipMemcpy(d_Vec_In[dev], &partial_vec_in[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Vec_Out[dev], &partial_vec_out[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Rhs[dev], &partial_rhs[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
}
if (auto err = hipGetLastError())
{
cout << "Data copy failed 1: " << hipGetErrorString(err) << endl;
return err;
}
//Copy intial Halos in 2D
//Initial Exchange Halos: Then do intial cudaMemcopies
exchangehalos_onHost(numDevices, deviceArray, numberOfDevicesAlong_X);
for (int dev = 0; dev < numDevices; dev++)
{
hipSetDevice(dev);
//Copying Halos to the device
if (deviceArray[dev].nHalo_flag == 1)
{
hipMemcpy(d_nhalos[dev], &deviceArray[dev].nHalo[0], chunk_X * sizeof(float), hipMemcpyHostToDevice);
}
if (deviceArray[dev].sHalo_flag == 1)
{
hipMemcpy(d_shalos[dev], &deviceArray[dev].sHalo[0], chunk_X * sizeof(float), hipMemcpyHostToDevice);
}
if (deviceArray[dev].eHalo_flag == 1)
{
hipMemcpy(d_ehalos[dev], &deviceArray[dev].eHalo[0], chunk_Y * sizeof(float), hipMemcpyHostToDevice);
}
if (deviceArray[dev].wHalo_flag == 1)
{
hipMemcpy(d_whalos[dev], &deviceArray[dev].wHalo[0], chunk_Y * sizeof(float), hipMemcpyHostToDevice);
}
}
if (auto err = hipGetLastError())
{
cout << "Halo Copy Failed " << hipGetErrorString(err) << endl;
return err;
}
//Development phase 2 changes : For p2p operation communication initialize buffers
for (int dev = 0; dev < numDevices; dev++)
{
hipSetDevice(dev);
//Copying Halos to the device
if (deviceArray[dev].nHalo_flag == 1)
{
//cout << "Device ID for nHaloFlag is : " << deviceArray[dev].deviceID<<endl;
hipMemcpy(x_buffer_north[dev], &deviceArray[dev].nHalo[0], chunk_X * sizeof(float), hipMemcpyHostToDevice);
}
if (deviceArray[dev].sHalo_flag == 1)
{
//cout << "Device ID for sHaloFlag is : " << deviceArray[dev].deviceID << endl;
hipMemcpy(x_buffer_south[dev], &deviceArray[dev].sHalo[0], chunk_X * sizeof(float), hipMemcpyHostToDevice);
}
if (deviceArray[dev].eHalo_flag == 1)
{
//cout << "Device ID for eHaloFlag is : " << deviceArray[dev].deviceID << endl;
hipMemcpy(y_buffer_east[dev], &deviceArray[dev].eHalo[0], chunk_Y * sizeof(float), hipMemcpyHostToDevice);
}
if (deviceArray[dev].wHalo_flag == 1)
{
//cout << "Device ID for wHaloFlag is : " << deviceArray[dev].deviceID << endl;
hipMemcpy(y_buffer_west[dev], &deviceArray[dev].wHalo[0], chunk_Y * sizeof(float), hipMemcpyHostToDevice);
}
}
}
//=================================Domain Decomposition Logic Ends =================================================================
//=================================Setting up the grids and blocks for kernel launch================================================
//int blocksize = -1;
//int threads = -1;
int2 myDim;
myDim.x = chunk_X;
myDim.y = chunk_Y;
dim3 block(BLOCKSIZE_X, BLOCKSIZE_Y);
dim3 grid(DIVRND(myDim.x, BLOCKSIZE_X), DIVRND(myDim.y, BLOCKSIZE_Y));
//==================================================================================================================================
//Call to kernal
int iterations = 0;
if (numJacobiIt != 0) {
iterations = numJacobiIt;
}
else
{
cout << endl << " No. of iterations is zero exiting... ";
//return;
}
//===========================================CUDA Stream implementation for performance. Phase 2 of Development ====================================================
//===========Algorithm Improvement: Identify the neighbours so that they could be launched together and the exchange can take place. Without having to wait for computation across all devices============================
//hipStream_t streams[4];//Possible to declare it dynamically ? Yes. Using Vectors.
vector<hipStream_t> streams(numDevices);
//Create seperate streams for each Halo Exchange
vector<hipStream_t> nHaloExchange(numDevices);
vector<hipStream_t> sHaloExchange(numDevices);
vector<hipStream_t> eHaloExchange(numDevices);
vector<hipStream_t> wHaloExchange(numDevices);
//hipStream_t nHaloExchange[4];
//hipStream_t sHaloExchange[4];
//hipStream_t eHaloExchange[4];
//hipStream_t wHaloExchange[4];
//Note: Default stream for a device is always syncronizing so creating seperate streams for each device
for (int i = 0; i < numDevices; i++)
{
hipSetDevice(i);
hipStreamCreate(&streams[i]);
if (p2penabled) {
hipStreamCreate(&nHaloExchange[i]);
hipStreamCreate(&sHaloExchange[i]);
hipStreamCreate(&eHaloExchange[i]);
hipStreamCreate(&wHaloExchange[i]);
}
}
//For explicit synchornizing p2p transfers and async memcopies
//hipEvent_t events[4];
vector<hipEvent_t> events(numDevices);
vector<hipEvent_t> nHaloEvent(numDevices);
vector<hipEvent_t> sHaloEvent(numDevices);
vector<hipEvent_t> eHaloEvent(numDevices);
vector<hipEvent_t> wHaloEvent(numDevices);
//hipEvent_t nHaloEvent[4];
//hipEvent_t sHaloEvent[4];
//hipEvent_t eHaloEvent[4];
//hipEvent_t wHaloEvent[4];
for (int i = 0; i < numDevices; i++)
{
hipSetDevice(i);
hipEventCreate(&events[i]);
if (p2penabled) {
hipEventCreate(&nHaloEvent[i]);
hipEventCreate(&sHaloEvent[i]);
hipEventCreate(&eHaloEvent[i]);
hipEventCreate(&wHaloEvent[i]);
}
}
/*Using a pagable memory first*/
//std::vector<float> partial_resultOnHost(chunk_X * chunk_Y);
/*Using a pinned(page locked) memory for performance*/
vector<float*>partial_resultOnHost(numDevices);
for (int dev = 0; dev < numDevices; dev++)
{
hipSetDevice(dev);
hipHostMalloc((void**)&partial_resultOnHost[dev], (chunk_X * chunk_Y) * sizeof(float));
}
//For OMP data race prevention pointer copies for Halos. TODO: change to vector of pointers
vector<float*> nhalo_ptr(numDevices);
vector<float*> shalo_ptr(numDevices);
vector<float*> ehalo_ptr(numDevices);
vector<float*> whalo_ptr(numDevices);
vector<float*> x_buffer_north_ptr_write(numDevices);
vector<float*> x_buffer_south_ptr_write(numDevices);
vector<float*> y_buffer_east_ptr_write(numDevices);
vector<float*> y_buffer_west_ptr_write(numDevices);
//initialize the ptr to null
for (int dev = 0; dev < numDevices; dev++)
{
//For OMP thread safety
nhalo_ptr[dev] = d_nhalos[dev];
shalo_ptr[dev] = d_shalos[dev];
ehalo_ptr[dev] = d_ehalos[dev];
whalo_ptr[dev] = d_whalos[dev];
x_buffer_north_ptr_write[dev]=x_buffer_north[dev];
x_buffer_south_ptr_write[dev]=x_buffer_south[dev];
y_buffer_east_ptr_write[dev]= y_buffer_east[dev];
y_buffer_west_ptr_write[dev]= y_buffer_west[dev];
}
//==============================================================
//Check performance
hipError_t status = hipGetLastError();
high_resolution_clock::time_point t1 = high_resolution_clock::now();
#pragma omp parallel num_threads(numDevices)
{
int dev = omp_get_thread_num();
//hipSetDevice(omp_get_thread_num());
for (int i = 0; i < iterations; i++)
{
hipSetDevice(dev);
#pragma omp barrier
if ((i>0))
{
//As this is not a run on a single host thread race conditions occurs. So have to manage the pointer swapping by creating a copy
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1)
{
swap(x_buffer_north[dev], d_nhalos[dev]);
}
//Check if device is having a south Halo buffer
if (deviceArray[dev].sHalo_flag == 1)
{
swap(x_buffer_south[dev], d_shalos[dev]);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1)
{
swap(y_buffer_east[dev], d_ehalos[dev]);
}
//Check if device is having a west Halo buffer
if (deviceArray[dev].wHalo_flag == 1)
{
swap(y_buffer_west[dev], d_whalos[dev]);
}
}
jacobi_Simple << <grid, block, 0, streams[dev] >> >(d_A0[dev], d_A1[dev], d_A2[dev], d_A3[dev], d_A4[dev], d_Vec_In[dev], d_Vec_Out[dev], d_Rhs[dev], deviceArray[dev].eHalo_flag, deviceArray[dev].wHalo_flag, deviceArray[dev].nHalo_flag, deviceArray[dev].sHalo_flag, d_ehalos[dev], d_whalos[dev], d_nhalos[dev], d_shalos[dev], deviceArray[dev].deviceID, numDevices, decom_Dim, myDim);
//For Synchronizing while Halo Exchange start
hipEventRecord(events[dev], streams[dev]);
if (i == (iterations - 1))//Copy the results just for the final iteration
{
hipMemcpyAsync(&partial_resultOnHost[dev][0], d_Vec_Out[dev], domainDivision[dev] * sizeof(float), hipMemcpyDeviceToHost, streams[dev]);
continue;
}
swap(d_Vec_In[dev], d_Vec_Out[dev]);
//Store Halo positions after iteration for exchanging
if (!p2penabled) {
if (numDevices > 1)
{
if (deviceArray[dev].nHalo_flag == 1)
{
hipStreamWaitEvent(nHaloExchange[dev], events[dev], 0);
hipMemcpyAsync(nHalo_pinned[dev], d_nhalos[dev], chunk_X * sizeof(float), hipMemcpyDeviceToHost, nHaloExchange[dev]);
if (auto err = hipGetLastError())
{
cout << "d_nhalos copy failed D2H: " << hipGetErrorString(err) << endl;
//return err;
}
}
if (deviceArray[dev].sHalo_flag == 1)
{
hipStreamWaitEvent(sHaloExchange[dev], events[dev], 0);
hipMemcpyAsync(sHalo_pinned[dev], d_shalos[dev], chunk_X * sizeof(float), hipMemcpyDeviceToHost, sHaloExchange[dev]);
if (auto err = hipGetLastError())
{
cout << "d_shalos copy failed D2H: " << hipGetErrorString(err) << endl;
//return err;
}
}
if (deviceArray[dev].eHalo_flag == 1)
{
hipStreamWaitEvent(eHaloExchange[dev], events[dev], 0);
hipMemcpyAsync(eHalo_pinned[dev], d_ehalos[dev], chunk_Y * sizeof(float), hipMemcpyDeviceToHost, eHaloExchange[dev]);
if (auto err = hipGetLastError())
{
cout << "d_ehalos copy failed D2H: " << hipGetErrorString(err) << endl;
//return err;
}
}
if (deviceArray[dev].wHalo_flag == 1)
{
hipStreamWaitEvent(wHaloExchange[dev], events[dev], 0);
hipMemcpyAsync(wHalo_pinned[dev], d_whalos[dev], chunk_Y * sizeof(float), hipMemcpyDeviceToHost, wHaloExchange[dev]);
if (auto err = hipGetLastError())
{
cout << "d_whalos copy failed D2H " << hipGetErrorString(err) << endl;
//return err;
}
}
}
}
if (auto err = hipGetLastError())
{
cout << "Data copy failed 2: " << hipGetErrorString(err) << endl;
//return err;
}
//Exchange Halos after each iteration except the last iteration
if ((i < (iterations - 1)))
{
//hipStreamSynchronize(streams[dev]);
//hipDeviceSynchronize();
if ((!p2penabled)) {
if (auto err = hipGetLastError())
{
cout << "Stream " << dev << " synchronize error for iteration : " << i << ". ERROR IS: " << hipGetErrorString(err) << endl;
//return err;
}
bool exchangeComplete = false;
//Note: Using Pinned memory on Host for Halos -> Performance Approach 1
//exchangehalos_onHost(numDevices, deviceArray, numberOfDevicesAlong_X);
exchangeComplete = exchangehalos_onHostPinned(numDevices, deviceArray, numberOfDevicesAlong_X, nHalo_pinned, sHalo_pinned, eHalo_pinned, wHalo_pinned);
if (exchangeComplete) {
for (int dev = 0; dev < numDevices; dev++)
{
//Swap input output vectors for all devices
swap(d_Vec_In[dev], d_Vec_Out[dev]);
hipSetDevice(dev);
//Copying Halos to the device
if (deviceArray[dev].nHalo_flag == 1)
{
hipMemcpyAsync(d_nhalos[dev], nHalo_pinned[dev], chunk_X * sizeof(float), hipMemcpyHostToDevice, nHaloExchange[dev]);
}
if (auto err = hipGetLastError())
{
cout << "d_nhalos copy failed H2D: " << hipGetErrorString(err) << endl;
//return err;
}
if (deviceArray[dev].sHalo_flag == 1)
{
hipMemcpyAsync(d_shalos[dev], sHalo_pinned[dev], chunk_X * sizeof(float), hipMemcpyHostToDevice, sHaloExchange[dev]);
}
if (auto err = hipGetLastError())
{
cout << "d_shalos copy failed H2D: " << hipGetErrorString(err) << endl;
//return err;
}
if (deviceArray[dev].eHalo_flag == 1)
{
hipMemcpyAsync(d_ehalos[dev], eHalo_pinned[dev], chunk_Y * sizeof(float), hipMemcpyHostToDevice, eHaloExchange[dev]);
}
if (auto err = hipGetLastError())
{
cout << "d_ehalos copy failed H2D: " << hipGetErrorString(err) << endl;
//return err;
}
if (deviceArray[dev].wHalo_flag == 1)
{
hipMemcpyAsync(d_whalos[dev], wHalo_pinned[dev], chunk_Y * sizeof(float), hipMemcpyHostToDevice, wHaloExchange[dev]);
}
if (auto err = hipGetLastError())
{
cout << "d_whalos copy failed H2D: " << hipGetErrorString(err) << endl;
//return err;
}
}
}
}
else {
//============Important: Before copying to buffers make sure the kernel on the respective GPU(s) finished execution using hipStreamWaitEvent=======================
int getDevCoord_X = deviceArray[dev].devicePosition_X;
int getDevCoord_Y = deviceArray[dev].devicePosition_Y;
nhalo_ptr[dev] = d_nhalos[dev];
shalo_ptr[dev] = d_shalos[dev];
ehalo_ptr[dev] = d_ehalos[dev];
whalo_ptr[dev] = d_whalos[dev];
x_buffer_north_ptr_write[dev] = x_buffer_north[dev];
x_buffer_south_ptr_write[dev] = x_buffer_south[dev];
y_buffer_east_ptr_write[dev] = y_buffer_east[dev];
y_buffer_west_ptr_write[dev] = y_buffer_west[dev];
#pragma omp barrier // Important: To make sure all threads assign proper values to duplicate pointers before Halo Exchange Begins
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1)
{
hipSetDevice(dev);
int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y+1, numberOfDevicesAlong_X);
//Exchange Halos
//Send to the device
hipStreamWaitEvent(nHaloExchange[dev], events[dev], 0);
hipMemcpyPeerAsync(x_buffer_south[devIDtoNorth], devIDtoNorth, d_nhalos[dev], dev, chunk_X * sizeof(float), nHaloExchange[dev]);
hipEventRecord(nHaloEvent[dev], nHaloExchange[dev]);
//Postpone the next iteration kernel execution till the p2p transfers complete
hipSetDevice(devIDtoNorth);
hipStreamWaitEvent(streams[devIDtoNorth], nHaloEvent[dev], 0);
}
//Check if device is having a south Halo buffer
if (deviceArray[dev].sHalo_flag == 1)
{
hipSetDevice(dev);
int devIDtoSouth = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y-1, numberOfDevicesAlong_X);
//Exchange Halos
//Send to the device
hipStreamWaitEvent(sHaloExchange[dev], events[dev], 0);
hipMemcpyPeerAsync(x_buffer_north[devIDtoSouth], devIDtoSouth, d_shalos[dev], dev, chunk_X * sizeof(float), sHaloExchange[dev]);
hipEventRecord(sHaloEvent[dev], sHaloExchange[dev]);
//Postpone the next iteration kernel execution till the p2p transfers complete
hipSetDevice(devIDtoSouth);
hipStreamWaitEvent(streams[devIDtoSouth], sHaloEvent[dev], 0);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1)
{
hipSetDevice(dev);
int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X+1, getDevCoord_Y, numberOfDevicesAlong_Y);
//Exchange Halos
//Send to the device
hipStreamWaitEvent(eHaloExchange[dev], events[dev], 0);
hipMemcpyPeerAsync(y_buffer_west[devIDtoEast], devIDtoEast, d_ehalos[dev], dev, chunk_Y * sizeof(float), eHaloExchange[dev]);
hipEventRecord(eHaloEvent[dev], eHaloExchange[dev]);
//Postpone the next iteration kernel execution till the p2p transfers complete
hipSetDevice(devIDtoEast);
hipStreamWaitEvent(streams[devIDtoEast], eHaloEvent[dev], 0);
}
//Check if device is having a west Halo buffer
if (deviceArray[dev].wHalo_flag == 1)
{
hipSetDevice(dev);
int devIDtoWest = getDeviceIDfromCoord(getDevCoord_X-1, getDevCoord_Y, numberOfDevicesAlong_Y);
//Exchange Halos
//Send to the device
hipStreamWaitEvent(wHaloExchange[dev], events[dev], 0);
hipMemcpyPeerAsync(y_buffer_east[devIDtoWest], devIDtoWest, d_whalos[dev], dev, chunk_Y * sizeof(float), wHaloExchange[dev]);
hipEventRecord(wHaloEvent[dev], wHaloExchange[dev]);
//Postpone the next iteration kernel execution till the p2p transfers complete
hipSetDevice(devIDtoWest);
hipStreamWaitEvent(streams[devIDtoWest], wHaloEvent[dev], 0);
}
/*if (auto err = hipGetLastError())
{
cout << "Halo Exchange Error: " << hipGetErrorString(err) << endl;
}*/
}
}
}
}
if (auto err = hipGetLastError())
{
cout << "Data copy failed 3: " << hipGetErrorString(err) << endl;
return err;
}
high_resolution_clock::time_point t2 = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(t2 - t1).count();
cout << endl << "Iterations successful. Time taken in microseconds :" << duration << endl;
//Sync and Destroy streams and events
for (int i = 0; i < numDevices; ++i)
{
hipSetDevice(i);
//Destroy Events
hipEventDestroy(events[i]);
hipEventDestroy(nHaloEvent[i]);
hipEventDestroy(sHaloEvent[i]);
hipEventDestroy(eHaloEvent[i]);
hipEventDestroy(wHaloEvent[i]);
//Synchro the streams
hipStreamSynchronize(streams[i]);
hipStreamDestroy(streams[i]);
hipStreamSynchronize(nHaloExchange[i]);
hipStreamDestroy(nHaloExchange[i]);
hipStreamSynchronize(sHaloExchange[i]);
hipStreamDestroy(sHaloExchange[i]);
hipStreamSynchronize(eHaloExchange[i]);
hipStreamDestroy(eHaloExchange[i]);
hipStreamSynchronize(wHaloExchange[i]);
hipStreamDestroy(wHaloExchange[i]);
}
//Results copied to disk
for (int dev = 0; dev < numDevices; dev++)
{
sendToPrint(&partial_resultOnHost[dev][0], deviceArray[dev].devicePosition_X, deviceArray[dev].devicePosition_Y, numberOfDevicesAlong_X, chunk_X, chunk_Y, dim, size, result, numDevices, iterations - 1, iterations);
}
//==========================================Performance using CUDA stream ends===========================================================================
//Done in phase 2 of development: Disble P2P across devices
if (p2penabled) {
disableP2P(numDevices);
}
//Free memory on device
for (int dev = 0; dev < numDevices; dev++)
{
hipSetDevice(dev);
hipFree(d_A0[dev]);
hipFree(d_A1[dev]);
hipFree(d_A2[dev]);
hipFree(d_A3[dev]);
hipFree(d_A4[dev]);
hipFree(d_Vec_In[dev]);
hipFree(d_Vec_Out[dev]);
hipFree(d_nhalos[dev]);
hipFree(d_shalos[dev]);
hipFree(d_ehalos[dev]);
hipFree(d_whalos[dev]);
hipFree(d_Rhs[dev]);
hipFree(x_buffer_south[dev]);
hipFree(x_buffer_north[dev]);
hipFree(y_buffer_west[dev]);
hipFree(y_buffer_east[dev]);
hipHostFree(partial_resultOnHost[dev]);
hipHostFree(nHalo_pinned[dev]);
hipHostFree(sHalo_pinned[dev]);
hipHostFree(wHalo_pinned[dev]);
hipHostFree(eHalo_pinned[dev]);
hipDeviceReset();
}
cout << endl << "Device Memory free successful." << endl;
//Take care of dynamic mem location
//delete[] domainDivision;
return hipSuccess;
}
int performJacobi_MultiGPU2D_Decom(unsigned int dim, unsigned int numJacobiIt, float* A0, float* A1, float* A2, float* A3, float* A4, float* rhs, float* x_in)
{
hipError_t cudaStatus = performMultiGPUJacobi(dim, numJacobiIt, &A0[0], &A1[0], &A2[0], &A3[0], &A4[0], &rhs[0], &x_in[0]);
if (cudaStatus != hipSuccess) {
cout << "Computation failed: " << endl;
return 1;
}
if (cudaStatus != hipSuccess) {
cout << "Cuda Device Reset failed: " << endl;
return 1;
}
return 0;
}
|
dc04316d88ad8f4e1e4c4ff475f89558d98196bf.cu
|
//P2P Synchronization using events and Seperate streams. Coupling-Overlapping several Exchanges together.
#include <omp.h>
#include "tinyxml.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "testMultiGPU_Jacobi2D_Decom.cuh"
#include <iostream>
#include <chrono>
#include <memory>
#include <vector>
#include <fstream>
#include <vector_types.h>
#define IMUL(a,b) __mul24(a,b)
#define DIVRND(a,b) ((a+b-1)/b)
#define BLOCKSIZE_X 32
#define BLOCKSIZE_Y 16
using namespace std;
using namespace std::chrono;
//cudaError_t performMultiGPUJacobi();
//Support for below c++14 on *nix
template<typename T, typename ...Args>
std::unique_ptr<T> make_unique(Args&& ...args)
{
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
struct create_Device
{
int deviceID;
//In a GPU topology set the GPU position
int devicePosition_X;
int devicePosition_Y;
int devicePosition_Z;
vector<float> eHalo;
vector<float> wHalo;
vector<float> nHalo;
vector<float> sHalo;
//Flags check the halos needed by the device
int eHalo_flag = 0;
int wHalo_flag = 0;
int nHalo_flag = 0;
int sHalo_flag = 0;
};
//Simple Jacobi iteration
__global__ void jacobi_Simple(const float *A0, const float *A1, const float *A2, const float *A3, const float *A4, float *x_in, float *x_out, const float *rhs, const int ehalo_flag, const int whalo_flag, const int nhalo_flag, const int shalo_flag, float *ehalo, float *whalo, float *nhalo, float *shalo, const int deviceID, const int numDevices, const int domain_Decom, int2 dim)
{
int2 pos = make_int2(
blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y
);
int index = (pos.y * dim.x) + pos.x;
//int index = threadIdx.x + blockDim.x * blockIdx.x;
float result = rhs[index];
int dim_x = dim.x;
int dim_y = dim.y;
//X_pos and Y_pos are just to understand the thread layout. Can be named to any suitable variable names
int x_pos = pos.y;
int y_pos = pos.x;
//result = nhalo[y_pos];
//x_out[index] = result;
//Get the boundaries
int leftBoundaryElem = x_pos * (dim_x);
int rightBoundaryElem = (x_pos * dim_x) + (dim_x - 1);
int topBoundaryElem = y_pos + ((dim_y - 1) * (dim_x));
int bottomBoundaryElem = y_pos;
/*if((deviceID==2)&&(index==leftBoundaryElem))
{
printf("For Device %d index is : %d\n", deviceID, index);
printf("For Device %d leftBoundaryElem is : %d\n", deviceID, leftBoundaryElem);
printf("rightBoundaryElem is : %d\n", rightBoundaryElem);
printf("topBoundaryElem is : %d\n", topBoundaryElem);
printf("bottomBoundaryElem is : %d\n", bottomBoundaryElem);
}*/
//Halo computation for 1D Decompostion: For the First and Last GPU Halo computation on both the sides(nhalo and shalo wont be needed)
if (domain_Decom == 1)
{
if (numDevices > 1)
{
//First GPU
if (deviceID == 0) {
//We need to use nhalos
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
//The top boundary needs element from nhalo
if (index == topBoundaryElem)
//nHalos
result -= A4[index] * nhalo[y_pos];
result /= A2[index];
x_out[index] = result;
//Update Halo at the end of computation
if (index == topBoundaryElem)
//nHalos updated
nhalo[y_pos] = result;
return;
}
//Last GPU
else if (deviceID == (numDevices - 1)) {
//We need to use shalos
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
//The Bottom boundary needs elements from shalo
if (index == bottomBoundaryElem)
//nHalos
result -= A0[index] * shalo[y_pos];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
result /= A2[index];
x_out[index] = result;
//Update Halo at the end of computation
if (index == bottomBoundaryElem)
//sHalos updated
shalo[y_pos] = result;
return;
}
//For all the middle GPUs
else
{
//We need to use both shalos and nhalos
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
//The Bottom boundary needs elements from shalo
if (index == bottomBoundaryElem)
//nHalos
result -= A0[index] * shalo[y_pos];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
//The top boundary needs element from nhalo
if (index == topBoundaryElem)
//nHalos
result -= A4[index] * nhalo[y_pos];
result /= A2[index];
x_out[index] = result;
//Update Halo at the end of computation
if (index == bottomBoundaryElem)
//sHalos updated
shalo[y_pos] = result;
//Update Halo at the end of computation
if (index == topBoundaryElem)
//nHalos updated
nhalo[y_pos] = result;
return;
}
}
}
else if (domain_Decom == 2) {
//======Left Bounday Elem
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
//Computation using the Halos
if (index == leftBoundaryElem) {
if (whalo_flag == 1) {
result -= A1[index] * whalo[x_pos];
}
}
//======Right Bounday Elem
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index == rightBoundaryElem) {
if (ehalo_flag == 1) {
result -= A3[index] * ehalo[x_pos];
}
}
//======Bottom Bounday Elem
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
if (index == bottomBoundaryElem) {
if (shalo_flag == 1) {
result -= A0[index] * shalo[y_pos];
}
}
//======Top Bounday Elem
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
if (index == topBoundaryElem) {
if (nhalo_flag == 1) {
result -= A4[index] * nhalo[y_pos];
}
}
result /= A2[index];
x_out[index] = result;
//Updating Halos at the End of the computation
if (index == topBoundaryElem) {
if (nhalo_flag == 1) {
nhalo[y_pos] = result;
}
}
if (index == bottomBoundaryElem) {
if (shalo_flag == 1) {
shalo[y_pos] = result;
}
}
if (index == leftBoundaryElem) {
if (whalo_flag == 1) {
whalo[x_pos] = result;
}
}
if (index == rightBoundaryElem) {
if (ehalo_flag == 1) {
ehalo[x_pos] = result;
}
}
return;
}
//For computations on a Machine with a single GPU
else
{
{//For some reason order of computation (left,right,top and bottom) gives a different result
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
result /= A2[index];
x_out[index] = result;
return;
}
}
}
//========================MultiGPU utility functions============================================================================
// load the named file and dump its structure to STDOUT
void getConfiguration(const char* pFilename, int &numDevices, int &domain_decom)
{
TiXmlDocument doc(pFilename);
bool loadOkay = doc.LoadFile();
if (loadOkay)
{
cout <<"\nFile Loaded successfully\n" ;
TiXmlElement *pRoot = doc.RootElement();
TiXmlElement *element = pRoot->FirstChildElement();
while (element)
{
string elementName = element->Value();
string attribute = element->Attribute("name"); //Gets you the time variable
string value = element->GetText();
cout << "\n The attribute is "<<attribute;
cout << "\n The elementName is " << elementName;
cout << "\n The element Value is " << value;
if (attribute=="numDevices") {
numDevices = stoi(value);
}
if (attribute == "decomposition") {
domain_decom = stoi(value);
}
element = element->NextSiblingElement();
}
}
else
{
cout << "\nCould not load config file\n";
}
}
void checkP2Paccess(int numGPUs)
{
for (int i = 0; i<numGPUs; i++)
{
cudaSetDevice(i);
for (int j = 0; j<numGPUs; j++)
{
int access;
if (i != j)
{
cudaDeviceCanAccessPeer(&access, i, j);
if (auto err = cudaGetLastError())
{
cout << "P2P Operations failed : " << cudaGetErrorString(err) << endl;
return;
}
}
}
}
cout << "\n***NOTE: In case a device doesn't have P2P access to other one, it falls back to normal memcopy procedure.\nSo you can see lesser Bandwidth (GB/s) in those cases.\n\n";
}
bool enableP2P(int numGPUs)
{
for (int i = 0; i<numGPUs; i++)
{
cudaSetDevice(i);
for (int j = 0; j<numGPUs; j++)
{
int access;
cudaDeviceCanAccessPeer(&access, i, j);
if (auto err = cudaGetLastError())
{
cout << "P2P Operations failed while enabling: " << cudaGetErrorString(err) << endl;
return false;
}
if (access)
{
cudaDeviceEnablePeerAccess(j, 0);
if (auto err = cudaGetLastError())
{
cout << "P2P Operations failed while enabling: " << cudaGetErrorString(err) << endl;
return false;
}
}
}
}
return true;
}
void disableP2P(int numGPUs)
{
for (int i = 0; i<numGPUs; i++)
{
cudaSetDevice(i);
for (int j = 0; j<numGPUs; j++)
{
int access;
cudaDeviceCanAccessPeer(&access, i, j);
if (auto err = cudaGetLastError())
{
cout << "P2P Operations failed while disabling : " << cudaGetErrorString(err) << endl;
return;
}
if (access)
{
cudaDeviceDisablePeerAccess(j);
if (auto err = cudaGetLastError())
{
cout << "P2P Operations failed while disabling: " << cudaGetErrorString(err) << endl;
return;
}
}
}
}
}
void performFactorPairing(int numDevices, int &fact_x, int &fact_y)
{
int i;
//Check if numDevices is Prime
bool isPrime = true;
for (i = 2; i < numDevices / 2; ++i)
{
if (numDevices % i == 0)
{
isPrime = false;
break;
}
}
if (isPrime)
{
fact_x = numDevices;
fact_y = 1;
}
else
{
//Finding the appropriate factor pairs to divide the grid
for (i = 2; i < numDevices / 2; ++i)
{
if (numDevices % i == 0) {
fact_x = i;
fact_y = numDevices / i;
}
}
}
}
//===============================================================================================================================
//====================================Creating Topology with the number of Devices available====================================
void generateGPUGRID(int numDevices, int &numberOfDevicesAlong_X, int &numberOfDevicesAlong_Y, int domainDecomType)
{
//Finding GPU topology along x and y
//Assumuing total number of devices is a perfect square(To be changed later)
if(domainDecomType==1)
{
numberOfDevicesAlong_X = numDevices;
numberOfDevicesAlong_Y = 1;
}
else
{
int val = -1;
val = (int)sqrt(numDevices);
if ((val*val) == numDevices)
{
numberOfDevicesAlong_X = val;
numberOfDevicesAlong_Y = val;
}
else
{
int fact_x = 1;
int fact_y = 1;
performFactorPairing(numDevices, fact_x, fact_y);
numberOfDevicesAlong_X = fact_x;
numberOfDevicesAlong_Y = fact_y;
}
}
}
/* Creates a topology for a number of devices in a system
for ex. The devices are aware of left, right, top and bottom neigbours in 2D
1. It also decides the chunk per devices by determining x-dimension and y-dimensions for per chunk of data per device.
2. It also initializes halos for each devices which can be exchanged with the neighbours
*/
void createTopology(int numDevices, vector<create_Device> &deviceArray, int numberOfDevicesAlong_X, int numberOfDevicesAlong_Y)
{
deviceArray.resize(numDevices);
unsigned int deviceCount = 0;
for (int gridCount_Y = 0; gridCount_Y < numberOfDevicesAlong_Y; gridCount_Y++) {
for (int gridCount_X = 0; gridCount_X < numberOfDevicesAlong_X; gridCount_X++) {
deviceArray[deviceCount].deviceID = deviceCount;
deviceArray[deviceCount].devicePosition_X = gridCount_X;
deviceArray[deviceCount].devicePosition_Y = gridCount_Y;
//devicePosition_Z to be changed later
deviceArray[deviceCount].devicePosition_Z = 1;
deviceCount++;
}
}
}
//==============================================================================================================================
//Init Halos: In 1D decomposition only North and South Halos are used. In 2D decomposition North, South, East and West Halo need to be initialized and computed
//TODO:Create a Halo Exchange Mechanism for 2D Multi GPU topology
void initHalos2D(create_Device &device, int chunk_X, int chunk_Y, float *vec_in, int maxdevicesAlong_X, int maxDevicesAlong_Y, int rowStartPos, int rowEndPos, int dim)
{
/*cout << endl << "Inside Halo Computation 2D. printing Details";
cout << endl << "Device ID " << device.deviceID;
cout << endl << "Device position X " << device.devicePosition_X;
cout << endl << "Device position Y " << device.devicePosition_Y;
cout << endl << "Row Start " << rowStartPos;
cout << endl << "Row End " << rowEndPos;*/
//Assigning counter for each individual Halos. To prevent update of the same counter
//int rowStartPosEast = rowStartPos;
int rowStartPosWest = rowStartPos;
int rowStartPosNorth = rowStartPos;
int rowStartPosSouth = rowStartPos;
int rowEndPosEast = rowEndPos;
//int rowEndPosWest = rowEndPos;
//int rowEndPosNorth = rowEndPos;
//int rowEndPosSouth = rowEndPos;
//Checks provided for Boundary devices in GPU topology
if ((device.devicePosition_X - 1) >= 0) {
//cout << "West Halo needed ";
device.wHalo_flag = 1;
device.wHalo.resize(chunk_Y);
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
device.wHalo[rowNum] = vec_in[rowStartPosWest];
//cout << rowStartPosWest << " ";
rowStartPosWest += dim;
}
}
if ((device.devicePosition_X + 1) < maxdevicesAlong_X) {
//cout << "East Halo needed ";
device.eHalo_flag = 1;
device.eHalo.resize(chunk_Y);
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
device.eHalo[rowNum] = vec_in[rowEndPosEast];
//cout << rowEndPosEast << " ";
rowEndPosEast += dim;
}
}
if ((device.devicePosition_Y - 1) >= 0) {
//cout << "South Halo needed ";
device.sHalo_flag = 1;
device.sHalo.resize(chunk_X);
for (int rowNum = 0; rowNum < chunk_X; rowNum++)
{
device.sHalo[rowNum] = vec_in[rowStartPosSouth];
//cout << rowStartPosSouth << " ";
rowStartPosSouth++;
}
}
if ((device.devicePosition_Y + 1) < maxDevicesAlong_Y) {
//cout << "North Halo needed ";
device.nHalo_flag = 1;
device.nHalo.resize(chunk_X);
rowStartPosNorth = rowStartPosNorth + (dim * (chunk_Y - 1));
for (int rowNum = 0; rowNum < chunk_X; rowNum++)
{
device.nHalo[rowNum] = vec_in[rowStartPosNorth];
//cout << rowStartPosNorth << " ";
rowStartPosNorth++;
}
}
}
//======================================Exchange Halos: on Host==============================================
int getDeviceIDfromCoord(int devCoord_x, int devCoord_y, int numberofDevicesAlong_X) {
int devID = (devCoord_y * numberofDevicesAlong_X) + devCoord_x;
return devID;
}
void exchangehalos_onHost(int numDevices, vector<create_Device> &deviceArray, int numberofDevicesAlong_X)
{
//Halos exist in pairs so:
//Important: A device exchanges North-to-South Pairs and East-to-West Pairs only. Not South-to-North pairs and West-to-East pairs
//That way the number of exchanges are kept to minimum
for (int dev = 0; dev < numDevices; dev++)
{
int getDevCoord_X = deviceArray[dev].devicePosition_X;
int getDevCoord_Y = deviceArray[dev].devicePosition_Y;
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1) {
int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X , getDevCoord_Y+1, numberofDevicesAlong_X);
//Exchange Halos
(deviceArray[dev].nHalo).swap(deviceArray[devIDtoNorth].sHalo);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1) {
int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X+1, getDevCoord_Y , numberofDevicesAlong_X);
//Exchange Halos
(deviceArray[dev].eHalo).swap(deviceArray[devIDtoEast].wHalo);
}
}
}
bool exchangehalos_onHostPinned(int numDevices, vector<create_Device> &deviceArray, int numberofDevicesAlong_X, vector<float*> &nHalosPinned, vector<float*> &sHalosPinned, vector<float*> &eHalosPinned, vector<float*> &wHalosPinned)
{
//Halos exist in pairs so:
//Important: A device exchanges North-to-South Pairs and East-to-West Pairs only. Not South-to-North pairs and West-to-East pairs
//That way the number of exchanges are kept to minimum
for (int dev = 0; dev < numDevices; dev++)
{
int getDevCoord_X = deviceArray[dev].devicePosition_X;
int getDevCoord_Y = deviceArray[dev].devicePosition_Y;
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1) {
int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X , getDevCoord_Y+1, numberofDevicesAlong_X);
//Exchange Halos
//(deviceArray[dev].nHalo).swap(deviceArray[devIDtoNorth].sHalo);
swap(nHalosPinned[dev], sHalosPinned[devIDtoNorth]);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1) {
int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X+1, getDevCoord_Y, numberofDevicesAlong_X);
//Exchange Halos
//(deviceArray[dev].eHalo).swap(deviceArray[devIDtoEast].wHalo);
swap(eHalosPinned[dev], wHalosPinned[devIDtoEast]);
}
}
return true;
}
//===========================Exchange Halos: on Host Ends=====================================================
//Init matrix Diagonals A0, A1, A2, A3, A4
void copyValues(float *A0, float *A1, float *A2, float *A3, float *A4, float *rhs, float *vec_in, float *vec_out, int dim, float *val_A0, float *val_A1, float *val_A2, float *val_A3, float *val_A4, float *val_rhs, float *val_x_in)
{
unsigned int size = dim * dim;
for (unsigned int i = 0; i < size; i++)
{
A0[i] = val_A0[i];
A1[i] = val_A1[i];
A2[i] = val_A2[i];
A3[i] = val_A3[i];
A4[i] = val_A4[i];
rhs[i] = val_rhs[i];
vec_in[i] = val_x_in[i];
vec_out[i] = 0.0f;
}
}
void getAllDeviceProperties() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
cout << " Device Number: " << i << endl;
cout << " Device name: " << prop.name << endl;
cout << " Memory Clock Rate (KHz): " << prop.memoryClockRate << endl;
cout << " Memory Bus Width (bits): " << prop.memoryBusWidth << endl;;
cout << " Peak Memory Bandwidth (GB/s): " << 2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6 << endl << endl << endl;
}
}
/* Prints an output file for checking results */
void sendToPrint(float *partial_result, int devicePosition_X, int devicePosition_Y, int numberOfDevicesAlong_X, int chunk_X, int chunk_Y, int dim, int totalSize, vector<float> &result, int numDevices, int currentIteration, int numberOfTotalIterations) {
int devicePosX = devicePosition_X;
int devicePosY = devicePosition_Y;
//Calculating data position based on device coords
//numberOfDevicesAlong_X * Chunk_X * Chunk_Y : finds out the total data per row of GPUs allocated
//int dataStartPos_X = (devicePosX * numberOfDevicesAlong_X * chunk_X * chunk_Y) + (devicePosY * chunk_X);
int dataStartPos_X = (devicePosY * dim * chunk_Y) + (devicePosX * chunk_X);
int dataEndPos_X = dataStartPos_X + chunk_X;
//One complete row across all GPU is dim in order to get the next element above an element we add (currentPosition + dim )
int rowStartPos = dataStartPos_X;
int rowEndPos = dataEndPos_X;
int indexCounter = 0;
//cout << endl;
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
//Get one complete row for the GPU
for (int pos = rowStartPos; pos < rowEndPos; pos++)
{
result[pos] = partial_result[indexCounter];
indexCounter++;
}
//cout << endl;
rowStartPos += dim;
rowEndPos = rowStartPos + chunk_X;
}
//Printing when the last device computation is done: Remove the check to check computation for each device
int deviceID = getDeviceIDfromCoord(devicePosition_X, devicePosition_Y, numberOfDevicesAlong_X);
if ((deviceID == (numDevices - 1)) && (currentIteration == (numberOfTotalIterations - 1)))
{
ofstream myfile;
myfile.open("data2.txt");
//Printing the values here
for (int i = totalSize; i > 0; i--) {
if (i%dim == 0) {
myfile << endl;
}
myfile << result[i - 1] << " ";
}
myfile.close();
}
}
cudaError_t performMultiGPUJacobi(unsigned int val_dim, unsigned int numJacobiIt, float* val_A0, float* val_A1, float* val_A2, float* val_A3, float* val_A4, float* val_rhs, float* val_x_in)
{
//Fixed value changed later
int dim = 8;
if (val_dim != 0) {
dim = val_dim;
}
//TODO: write a 2D domain decomposition method for more than 2 GPUs
int size = dim * dim;
//auto result = make_unique<float[]>(size);
//Create Diagonal Vectors
std::vector<float> a0(size);
std::vector<float> a1(size);
std::vector<float> a2(size);
std::vector<float> a3(size);
std::vector<float> a4(size);
std::vector<float> vec_in(size);
std::vector<float> vec_out(size);
std::vector<float> rhs(size);
std::vector<float> result(size);
//Get the total number of devices
int numDevices = -1;
cudaGetDeviceCount(&numDevices);
//numDevices = 2;
//Set Decomposition dimension 1D or 2D: when decomposition is 0. Computation happens on a single GPU
int decom_Dim = 2;
//Set Values for Domain Decompostion type 1D or 2D
int domainDecom_Dim = decom_Dim;
//Read the custom config defined in file "multiGPUConfig.xml"
getConfiguration("multiGPUConfig.xml", numDevices, domainDecom_Dim);
cout << endl << "Total number of Devices in the System are : " << numDevices << endl;
getAllDeviceProperties();
//Enable Peer-to-Peer access across all GPUs : Done on phase 2 of development
bool p2penabled = false;
p2penabled = enableP2P(numDevices);
//Configuring the number of GPU's manually
//numDevices=2;
copyValues(&a0[0], &a1[0], &a2[0], &a3[0], &a4[0], &rhs[0], &vec_in[0], &vec_out[0], dim, &val_A0[0], &val_A1[0], &val_A2[0], &val_A3[0], &val_A4[0], &val_rhs[0], &val_x_in[0]);
vector<create_Device> deviceArray;
/* Distributed Compuation using Halos: Algorithm
1. Init Halos.
1.a) In 1D decomposition nhalo and shalo intialized from vector x_in
1.b) In 2D decompsition nhalo,shalo, ehalo and whalo initialozed from vector x_in
2. Pass the halos to Jacobi_kernal.
3. Store the result computed at the boundary into the halo boundary positions.
4. Swap nhalo and shalo pairs in 1D decompostion. Swap (nhalo,shalo) and (ehalo,whalo) in 2D.
*/
//=================================Domain Decomposition Logic Starts=================================================================
/*Generating a GPU Grid with multiple GPUs and creating a Topology*/
int numberOfDevicesAlong_X = 1;
int numberOfDevicesAlong_Y = 1;
generateGPUGRID(numDevices, numberOfDevicesAlong_X, numberOfDevicesAlong_Y, domainDecom_Dim);
cout << "GPU grid structure is : " << numberOfDevicesAlong_X << " X " << numberOfDevicesAlong_Y << endl;
//Total elements along each dim in 2D
int chunk_X = dim / numberOfDevicesAlong_X;
int chunk_Y = dim / numberOfDevicesAlong_Y;
/* Creating a GPU topology with multiple devices*/
createTopology(numDevices, deviceArray, numberOfDevicesAlong_X, numberOfDevicesAlong_Y);
//Let the total number of GPU be 2 : has to be changed later
//Computation divided into (size/2) on first and size-(size/2) on second
std::vector<int> domainDivision(numDevices);
//Logic for total chunk per device (Domain distribution)
for (int i = 0; i < numDevices; i++) {
//Chunk per GPU will be same irrepective of 1D or 2D decomposition
domainDivision[i] = size / numDevices;
}
//For use on Device
std::vector<float*>d_A0(numDevices);
std::vector<float*>d_A1(numDevices);
std::vector<float*>d_A2(numDevices);
std::vector<float*>d_A3(numDevices);
std::vector<float*>d_A4(numDevices);
std::vector<float*>d_Vec_In(numDevices);
std::vector<float*>d_Vec_Out(numDevices);
std::vector<float*>d_nhalos(numDevices);
std::vector<float*>d_shalos(numDevices);
std::vector<float*>d_ehalos(numDevices);
std::vector<float*>d_whalos(numDevices);
std::vector<float*>d_Rhs(numDevices);
//Device Buffers for parallel communication using streams: Concept of Front and Back Buffer Oct 30, 2017
std::vector<float*>x_buffer_north(numDevices);
std::vector<float*>x_buffer_south(numDevices);
std::vector<float*>y_buffer_west(numDevices);
std::vector<float*>y_buffer_east(numDevices);
//Note: Using Pinned memory on Host for Halos -> Performance Approach 1
vector<float*>nHalo_pinned(numDevices);
vector<float*>sHalo_pinned(numDevices);
vector<float*>wHalo_pinned(numDevices);
vector<float*>eHalo_pinned(numDevices);
for (int dev = 0; dev < numDevices; dev++)
{
cudaSetDevice(dev);
cudaMallocHost((void**)&nHalo_pinned[dev], (chunk_X) * sizeof(float));
cudaMallocHost((void**)&sHalo_pinned[dev], (chunk_X) * sizeof(float));
cudaMallocHost((void**)&wHalo_pinned[dev], (chunk_Y) * sizeof(float));
cudaMallocHost((void**)&eHalo_pinned[dev], (chunk_Y) * sizeof(float));
}
for (int dev = 0; dev < numDevices; dev++)
{
//Setting the device before allocation
cudaSetDevice(dev);
//cudamalloc the Diagonals
cudaMalloc((void**)&d_A0[dev], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_A1[dev], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_A2[dev], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_A3[dev], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_A4[dev], domainDivision[dev] * sizeof(float));
//Using pinned memory as part of performance upgrade- Phase 2 of development
//cudamalloc the Input Vector and Result vector
cudaMalloc((void**)&d_Vec_In[dev], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_Vec_Out[dev], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_Rhs[dev], domainDivision[dev] * sizeof(float));
//cudaMalloc Halos: North and South--1D. TODO: East and West for 2D
cudaMalloc((void**)&d_nhalos[dev], chunk_X * sizeof(float));
cudaMalloc((void**)&d_shalos[dev], chunk_X * sizeof(float));
cudaMalloc((void**)&d_ehalos[dev], chunk_Y * sizeof(float));
cudaMalloc((void**)&d_whalos[dev], chunk_Y * sizeof(float));
//Buffer memory used for p2p exchange
cudaMalloc((void**)&x_buffer_north[dev], chunk_X * sizeof(float));
cudaMalloc((void**)&x_buffer_south[dev], chunk_X * sizeof(float));
cudaMalloc((void**)&y_buffer_west[dev], chunk_Y * sizeof(float));
cudaMalloc((void**)&y_buffer_east[dev], chunk_Y * sizeof(float));
}
/* The transfer of Data from Host to Device : Domain Decomposition in 2D*/
if (decom_Dim == 2) {
//Create Partial Diagonal Vectors
//Size per GPU will be
int chunkSize = chunk_X * chunk_Y;
std::vector<float> partial_a0(chunkSize);
std::vector<float> partial_a1(chunkSize);
std::vector<float> partial_a2(chunkSize);
std::vector<float> partial_a3(chunkSize);
std::vector<float> partial_a4(chunkSize);
std::vector<float> partial_vec_in(chunkSize);
std::vector<float> partial_vec_out(chunkSize);
std::vector<float> partial_rhs(chunkSize);
std::vector<float> partial_result(chunkSize);
for (int dev = 0; dev < numDevices; dev++)
{
//Test the properties of the device assigned
//cout << endl << "New Logical Device created " << deviceArray[dev].deviceID;
//cout << endl << "New Logical Device (X,Y) coord (" << deviceArray[dev].devicePosition_X << "," << deviceArray[dev].devicePosition_Y << ")";
//==========Important: Logic for creation of Chunks to be allocated to GPUs==========================================
//Important : Mention about the correlation between the topology and data position in the thesis
int devicePosX = deviceArray[dev].devicePosition_X;
int devicePosY = deviceArray[dev].devicePosition_Y;
//cout << endl << "For Device ID " << deviceArray[dev].deviceID << endl;
//cout << endl << "Device pos X " << devicePosX << endl;
//cout << endl << "Device pos Y " << devicePosY << endl;
//cout << endl << "Chunk X " << chunk_X << endl;
//cout << endl << "Chunk Y " << chunk_Y << endl;
//cout << endl << "Number of device along X " << numberOfDevicesAlong_X << endl;
//cout << endl << "Number of device along Y " << numberOfDevicesAlong_Y << endl;
//Calculating data position based on device coords
//numberOfDevicesAlong_X * Chunk_X * Chunk_Y : finds out the total data per row of GPUs allocated
//int dataStartPos_X = (devicePosX * numberOfDevicesAlong_X * chunk_X * chunk_Y) + (devicePosY * chunk_X);
int dataStartPos_X = (devicePosY * dim * chunk_Y) + (devicePosX * chunk_X);
int dataEndPos_X = dataStartPos_X + chunk_X;
//cout << endl << "Data Start Pos is " << dataStartPos_X << endl;
//cout << endl << "Data End Pos is " << dataEndPos_X << endl;
//One complete row across all GPU is dim in order to get the next element above an element we add (currentPosition + dim )
int rowStartPos = dataStartPos_X;
int rowEndPos = dataEndPos_X;
int indexCounter = 0;
//Initialize Halos
initHalos2D(deviceArray[dev], chunk_X, chunk_Y, &vec_in[0], numberOfDevicesAlong_X, numberOfDevicesAlong_Y, rowStartPos, rowEndPos - 1, dim);
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
//cout << endl << "Data Start Pos is " << rowStartPos << endl;
//Get one complete row for the GPU
for (int pos = rowStartPos; pos < rowEndPos; pos++)
{
partial_a0[indexCounter] = a0[pos];
partial_a1[indexCounter] = a1[pos];
partial_a2[indexCounter] = a2[pos];
partial_a3[indexCounter] = a3[pos];
partial_a4[indexCounter] = a4[pos];
partial_vec_in[indexCounter] = vec_in[pos];
partial_vec_out[indexCounter] = vec_out[pos];
partial_rhs[indexCounter] = rhs[pos];
partial_result[indexCounter] = result[pos];
indexCounter++;
}
//cout << endl << "Data End Pos is " << rowEndPos << endl;
rowStartPos += dim;
rowEndPos = rowStartPos+chunk_X;
}
//==========Important: Logic for creation of Chunks to be allocated to GPUs Ends ==========================================
//Setting Cuda device
cudaSetDevice(dev);
//Copy the diagonals from host to device : calling all at once instead of putting inside the for loop
cudaMemcpy(d_A0[dev], &partial_a0[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_A1[dev], &partial_a1[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_A2[dev], &partial_a2[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_A3[dev], &partial_a3[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_A4[dev], &partial_a4[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
//Copy in and out vectors and RHS
cudaMemcpy(d_Vec_In[dev], &partial_vec_in[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Vec_Out[dev], &partial_vec_out[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Rhs[dev], &partial_rhs[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
}
if (auto err = cudaGetLastError())
{
cout << "Data copy failed 1: " << cudaGetErrorString(err) << endl;
return err;
}
//Copy intial Halos in 2D
//Initial Exchange Halos: Then do intial cudaMemcopies
exchangehalos_onHost(numDevices, deviceArray, numberOfDevicesAlong_X);
for (int dev = 0; dev < numDevices; dev++)
{
cudaSetDevice(dev);
//Copying Halos to the device
if (deviceArray[dev].nHalo_flag == 1)
{
cudaMemcpy(d_nhalos[dev], &deviceArray[dev].nHalo[0], chunk_X * sizeof(float), cudaMemcpyHostToDevice);
}
if (deviceArray[dev].sHalo_flag == 1)
{
cudaMemcpy(d_shalos[dev], &deviceArray[dev].sHalo[0], chunk_X * sizeof(float), cudaMemcpyHostToDevice);
}
if (deviceArray[dev].eHalo_flag == 1)
{
cudaMemcpy(d_ehalos[dev], &deviceArray[dev].eHalo[0], chunk_Y * sizeof(float), cudaMemcpyHostToDevice);
}
if (deviceArray[dev].wHalo_flag == 1)
{
cudaMemcpy(d_whalos[dev], &deviceArray[dev].wHalo[0], chunk_Y * sizeof(float), cudaMemcpyHostToDevice);
}
}
if (auto err = cudaGetLastError())
{
cout << "Halo Copy Failed " << cudaGetErrorString(err) << endl;
return err;
}
//Development phase 2 changes : For p2p operation communication initialize buffers
for (int dev = 0; dev < numDevices; dev++)
{
cudaSetDevice(dev);
//Copying Halos to the device
if (deviceArray[dev].nHalo_flag == 1)
{
//cout << "Device ID for nHaloFlag is : " << deviceArray[dev].deviceID<<endl;
cudaMemcpy(x_buffer_north[dev], &deviceArray[dev].nHalo[0], chunk_X * sizeof(float), cudaMemcpyHostToDevice);
}
if (deviceArray[dev].sHalo_flag == 1)
{
//cout << "Device ID for sHaloFlag is : " << deviceArray[dev].deviceID << endl;
cudaMemcpy(x_buffer_south[dev], &deviceArray[dev].sHalo[0], chunk_X * sizeof(float), cudaMemcpyHostToDevice);
}
if (deviceArray[dev].eHalo_flag == 1)
{
//cout << "Device ID for eHaloFlag is : " << deviceArray[dev].deviceID << endl;
cudaMemcpy(y_buffer_east[dev], &deviceArray[dev].eHalo[0], chunk_Y * sizeof(float), cudaMemcpyHostToDevice);
}
if (deviceArray[dev].wHalo_flag == 1)
{
//cout << "Device ID for wHaloFlag is : " << deviceArray[dev].deviceID << endl;
cudaMemcpy(y_buffer_west[dev], &deviceArray[dev].wHalo[0], chunk_Y * sizeof(float), cudaMemcpyHostToDevice);
}
}
}
//=================================Domain Decomposition Logic Ends =================================================================
//=================================Setting up the grids and blocks for kernel launch================================================
//int blocksize = -1;
//int threads = -1;
int2 myDim;
myDim.x = chunk_X;
myDim.y = chunk_Y;
dim3 block(BLOCKSIZE_X, BLOCKSIZE_Y);
dim3 grid(DIVRND(myDim.x, BLOCKSIZE_X), DIVRND(myDim.y, BLOCKSIZE_Y));
//==================================================================================================================================
//Call to kernal
int iterations = 0;
if (numJacobiIt != 0) {
iterations = numJacobiIt;
}
else
{
cout << endl << " No. of iterations is zero exiting... ";
//return;
}
//===========================================CUDA Stream implementation for performance. Phase 2 of Development ====================================================
//===========Algorithm Improvement: Identify the neighbours so that they could be launched together and the exchange can take place. Without having to wait for computation across all devices============================
//cudaStream_t streams[4];//Possible to declare it dynamically ? Yes. Using Vectors.
vector<cudaStream_t> streams(numDevices);
//Create seperate streams for each Halo Exchange
vector<cudaStream_t> nHaloExchange(numDevices);
vector<cudaStream_t> sHaloExchange(numDevices);
vector<cudaStream_t> eHaloExchange(numDevices);
vector<cudaStream_t> wHaloExchange(numDevices);
//cudaStream_t nHaloExchange[4];
//cudaStream_t sHaloExchange[4];
//cudaStream_t eHaloExchange[4];
//cudaStream_t wHaloExchange[4];
//Note: Default stream for a device is always syncronizing so creating seperate streams for each device
for (int i = 0; i < numDevices; i++)
{
cudaSetDevice(i);
cudaStreamCreate(&streams[i]);
if (p2penabled) {
cudaStreamCreate(&nHaloExchange[i]);
cudaStreamCreate(&sHaloExchange[i]);
cudaStreamCreate(&eHaloExchange[i]);
cudaStreamCreate(&wHaloExchange[i]);
}
}
//For explicit synchornizing p2p transfers and async memcopies
//cudaEvent_t events[4];
vector<cudaEvent_t> events(numDevices);
vector<cudaEvent_t> nHaloEvent(numDevices);
vector<cudaEvent_t> sHaloEvent(numDevices);
vector<cudaEvent_t> eHaloEvent(numDevices);
vector<cudaEvent_t> wHaloEvent(numDevices);
//cudaEvent_t nHaloEvent[4];
//cudaEvent_t sHaloEvent[4];
//cudaEvent_t eHaloEvent[4];
//cudaEvent_t wHaloEvent[4];
for (int i = 0; i < numDevices; i++)
{
cudaSetDevice(i);
cudaEventCreate(&events[i]);
if (p2penabled) {
cudaEventCreate(&nHaloEvent[i]);
cudaEventCreate(&sHaloEvent[i]);
cudaEventCreate(&eHaloEvent[i]);
cudaEventCreate(&wHaloEvent[i]);
}
}
/*Using a pagable memory first*/
//std::vector<float> partial_resultOnHost(chunk_X * chunk_Y);
/*Using a pinned(page locked) memory for performance*/
vector<float*>partial_resultOnHost(numDevices);
for (int dev = 0; dev < numDevices; dev++)
{
cudaSetDevice(dev);
cudaMallocHost((void**)&partial_resultOnHost[dev], (chunk_X * chunk_Y) * sizeof(float));
}
//For OMP data race prevention pointer copies for Halos. TODO: change to vector of pointers
vector<float*> nhalo_ptr(numDevices);
vector<float*> shalo_ptr(numDevices);
vector<float*> ehalo_ptr(numDevices);
vector<float*> whalo_ptr(numDevices);
vector<float*> x_buffer_north_ptr_write(numDevices);
vector<float*> x_buffer_south_ptr_write(numDevices);
vector<float*> y_buffer_east_ptr_write(numDevices);
vector<float*> y_buffer_west_ptr_write(numDevices);
//initialize the ptr to null
for (int dev = 0; dev < numDevices; dev++)
{
//For OMP thread safety
nhalo_ptr[dev] = d_nhalos[dev];
shalo_ptr[dev] = d_shalos[dev];
ehalo_ptr[dev] = d_ehalos[dev];
whalo_ptr[dev] = d_whalos[dev];
x_buffer_north_ptr_write[dev]=x_buffer_north[dev];
x_buffer_south_ptr_write[dev]=x_buffer_south[dev];
y_buffer_east_ptr_write[dev]= y_buffer_east[dev];
y_buffer_west_ptr_write[dev]= y_buffer_west[dev];
}
//==============================================================
//Check performance
cudaError_t status = cudaGetLastError();
high_resolution_clock::time_point t1 = high_resolution_clock::now();
#pragma omp parallel num_threads(numDevices)
{
int dev = omp_get_thread_num();
//cudaSetDevice(omp_get_thread_num());
for (int i = 0; i < iterations; i++)
{
cudaSetDevice(dev);
#pragma omp barrier
if ((i>0))
{
//As this is not a run on a single host thread race conditions occurs. So have to manage the pointer swapping by creating a copy
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1)
{
swap(x_buffer_north[dev], d_nhalos[dev]);
}
//Check if device is having a south Halo buffer
if (deviceArray[dev].sHalo_flag == 1)
{
swap(x_buffer_south[dev], d_shalos[dev]);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1)
{
swap(y_buffer_east[dev], d_ehalos[dev]);
}
//Check if device is having a west Halo buffer
if (deviceArray[dev].wHalo_flag == 1)
{
swap(y_buffer_west[dev], d_whalos[dev]);
}
}
jacobi_Simple << <grid, block, 0, streams[dev] >> >(d_A0[dev], d_A1[dev], d_A2[dev], d_A3[dev], d_A4[dev], d_Vec_In[dev], d_Vec_Out[dev], d_Rhs[dev], deviceArray[dev].eHalo_flag, deviceArray[dev].wHalo_flag, deviceArray[dev].nHalo_flag, deviceArray[dev].sHalo_flag, d_ehalos[dev], d_whalos[dev], d_nhalos[dev], d_shalos[dev], deviceArray[dev].deviceID, numDevices, decom_Dim, myDim);
//For Synchronizing while Halo Exchange start
cudaEventRecord(events[dev], streams[dev]);
if (i == (iterations - 1))//Copy the results just for the final iteration
{
cudaMemcpyAsync(&partial_resultOnHost[dev][0], d_Vec_Out[dev], domainDivision[dev] * sizeof(float), cudaMemcpyDeviceToHost, streams[dev]);
continue;
}
swap(d_Vec_In[dev], d_Vec_Out[dev]);
//Store Halo positions after iteration for exchanging
if (!p2penabled) {
if (numDevices > 1)
{
if (deviceArray[dev].nHalo_flag == 1)
{
cudaStreamWaitEvent(nHaloExchange[dev], events[dev], 0);
cudaMemcpyAsync(nHalo_pinned[dev], d_nhalos[dev], chunk_X * sizeof(float), cudaMemcpyDeviceToHost, nHaloExchange[dev]);
if (auto err = cudaGetLastError())
{
cout << "d_nhalos copy failed D2H: " << cudaGetErrorString(err) << endl;
//return err;
}
}
if (deviceArray[dev].sHalo_flag == 1)
{
cudaStreamWaitEvent(sHaloExchange[dev], events[dev], 0);
cudaMemcpyAsync(sHalo_pinned[dev], d_shalos[dev], chunk_X * sizeof(float), cudaMemcpyDeviceToHost, sHaloExchange[dev]);
if (auto err = cudaGetLastError())
{
cout << "d_shalos copy failed D2H: " << cudaGetErrorString(err) << endl;
//return err;
}
}
if (deviceArray[dev].eHalo_flag == 1)
{
cudaStreamWaitEvent(eHaloExchange[dev], events[dev], 0);
cudaMemcpyAsync(eHalo_pinned[dev], d_ehalos[dev], chunk_Y * sizeof(float), cudaMemcpyDeviceToHost, eHaloExchange[dev]);
if (auto err = cudaGetLastError())
{
cout << "d_ehalos copy failed D2H: " << cudaGetErrorString(err) << endl;
//return err;
}
}
if (deviceArray[dev].wHalo_flag == 1)
{
cudaStreamWaitEvent(wHaloExchange[dev], events[dev], 0);
cudaMemcpyAsync(wHalo_pinned[dev], d_whalos[dev], chunk_Y * sizeof(float), cudaMemcpyDeviceToHost, wHaloExchange[dev]);
if (auto err = cudaGetLastError())
{
cout << "d_whalos copy failed D2H " << cudaGetErrorString(err) << endl;
//return err;
}
}
}
}
if (auto err = cudaGetLastError())
{
cout << "Data copy failed 2: " << cudaGetErrorString(err) << endl;
//return err;
}
//Exchange Halos after each iteration except the last iteration
if ((i < (iterations - 1)))
{
//cudaStreamSynchronize(streams[dev]);
//cudaDeviceSynchronize();
if ((!p2penabled)) {
if (auto err = cudaGetLastError())
{
cout << "Stream " << dev << " synchronize error for iteration : " << i << ". ERROR IS: " << cudaGetErrorString(err) << endl;
//return err;
}
bool exchangeComplete = false;
//Note: Using Pinned memory on Host for Halos -> Performance Approach 1
//exchangehalos_onHost(numDevices, deviceArray, numberOfDevicesAlong_X);
exchangeComplete = exchangehalos_onHostPinned(numDevices, deviceArray, numberOfDevicesAlong_X, nHalo_pinned, sHalo_pinned, eHalo_pinned, wHalo_pinned);
if (exchangeComplete) {
for (int dev = 0; dev < numDevices; dev++)
{
//Swap input output vectors for all devices
swap(d_Vec_In[dev], d_Vec_Out[dev]);
cudaSetDevice(dev);
//Copying Halos to the device
if (deviceArray[dev].nHalo_flag == 1)
{
cudaMemcpyAsync(d_nhalos[dev], nHalo_pinned[dev], chunk_X * sizeof(float), cudaMemcpyHostToDevice, nHaloExchange[dev]);
}
if (auto err = cudaGetLastError())
{
cout << "d_nhalos copy failed H2D: " << cudaGetErrorString(err) << endl;
//return err;
}
if (deviceArray[dev].sHalo_flag == 1)
{
cudaMemcpyAsync(d_shalos[dev], sHalo_pinned[dev], chunk_X * sizeof(float), cudaMemcpyHostToDevice, sHaloExchange[dev]);
}
if (auto err = cudaGetLastError())
{
cout << "d_shalos copy failed H2D: " << cudaGetErrorString(err) << endl;
//return err;
}
if (deviceArray[dev].eHalo_flag == 1)
{
cudaMemcpyAsync(d_ehalos[dev], eHalo_pinned[dev], chunk_Y * sizeof(float), cudaMemcpyHostToDevice, eHaloExchange[dev]);
}
if (auto err = cudaGetLastError())
{
cout << "d_ehalos copy failed H2D: " << cudaGetErrorString(err) << endl;
//return err;
}
if (deviceArray[dev].wHalo_flag == 1)
{
cudaMemcpyAsync(d_whalos[dev], wHalo_pinned[dev], chunk_Y * sizeof(float), cudaMemcpyHostToDevice, wHaloExchange[dev]);
}
if (auto err = cudaGetLastError())
{
cout << "d_whalos copy failed H2D: " << cudaGetErrorString(err) << endl;
//return err;
}
}
}
}
else {
//============Important: Before copying to buffers make sure the kernel on the respective GPU(s) finished execution using cudaStreamWaitEvent=======================
int getDevCoord_X = deviceArray[dev].devicePosition_X;
int getDevCoord_Y = deviceArray[dev].devicePosition_Y;
nhalo_ptr[dev] = d_nhalos[dev];
shalo_ptr[dev] = d_shalos[dev];
ehalo_ptr[dev] = d_ehalos[dev];
whalo_ptr[dev] = d_whalos[dev];
x_buffer_north_ptr_write[dev] = x_buffer_north[dev];
x_buffer_south_ptr_write[dev] = x_buffer_south[dev];
y_buffer_east_ptr_write[dev] = y_buffer_east[dev];
y_buffer_west_ptr_write[dev] = y_buffer_west[dev];
#pragma omp barrier // Important: To make sure all threads assign proper values to duplicate pointers before Halo Exchange Begins
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1)
{
cudaSetDevice(dev);
int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y+1, numberOfDevicesAlong_X);
//Exchange Halos
//Send to the device
cudaStreamWaitEvent(nHaloExchange[dev], events[dev], 0);
cudaMemcpyPeerAsync(x_buffer_south[devIDtoNorth], devIDtoNorth, d_nhalos[dev], dev, chunk_X * sizeof(float), nHaloExchange[dev]);
cudaEventRecord(nHaloEvent[dev], nHaloExchange[dev]);
//Postpone the next iteration kernel execution till the p2p transfers complete
cudaSetDevice(devIDtoNorth);
cudaStreamWaitEvent(streams[devIDtoNorth], nHaloEvent[dev], 0);
}
//Check if device is having a south Halo buffer
if (deviceArray[dev].sHalo_flag == 1)
{
cudaSetDevice(dev);
int devIDtoSouth = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y-1, numberOfDevicesAlong_X);
//Exchange Halos
//Send to the device
cudaStreamWaitEvent(sHaloExchange[dev], events[dev], 0);
cudaMemcpyPeerAsync(x_buffer_north[devIDtoSouth], devIDtoSouth, d_shalos[dev], dev, chunk_X * sizeof(float), sHaloExchange[dev]);
cudaEventRecord(sHaloEvent[dev], sHaloExchange[dev]);
//Postpone the next iteration kernel execution till the p2p transfers complete
cudaSetDevice(devIDtoSouth);
cudaStreamWaitEvent(streams[devIDtoSouth], sHaloEvent[dev], 0);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1)
{
cudaSetDevice(dev);
int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X+1, getDevCoord_Y, numberOfDevicesAlong_Y);
//Exchange Halos
//Send to the device
cudaStreamWaitEvent(eHaloExchange[dev], events[dev], 0);
cudaMemcpyPeerAsync(y_buffer_west[devIDtoEast], devIDtoEast, d_ehalos[dev], dev, chunk_Y * sizeof(float), eHaloExchange[dev]);
cudaEventRecord(eHaloEvent[dev], eHaloExchange[dev]);
//Postpone the next iteration kernel execution till the p2p transfers complete
cudaSetDevice(devIDtoEast);
cudaStreamWaitEvent(streams[devIDtoEast], eHaloEvent[dev], 0);
}
//Check if device is having a west Halo buffer
if (deviceArray[dev].wHalo_flag == 1)
{
cudaSetDevice(dev);
int devIDtoWest = getDeviceIDfromCoord(getDevCoord_X-1, getDevCoord_Y, numberOfDevicesAlong_Y);
//Exchange Halos
//Send to the device
cudaStreamWaitEvent(wHaloExchange[dev], events[dev], 0);
cudaMemcpyPeerAsync(y_buffer_east[devIDtoWest], devIDtoWest, d_whalos[dev], dev, chunk_Y * sizeof(float), wHaloExchange[dev]);
cudaEventRecord(wHaloEvent[dev], wHaloExchange[dev]);
//Postpone the next iteration kernel execution till the p2p transfers complete
cudaSetDevice(devIDtoWest);
cudaStreamWaitEvent(streams[devIDtoWest], wHaloEvent[dev], 0);
}
/*if (auto err = cudaGetLastError())
{
cout << "Halo Exchange Error: " << cudaGetErrorString(err) << endl;
}*/
}
}
}
}
if (auto err = cudaGetLastError())
{
cout << "Data copy failed 3: " << cudaGetErrorString(err) << endl;
return err;
}
high_resolution_clock::time_point t2 = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(t2 - t1).count();
cout << endl << "Iterations successful. Time taken in microseconds :" << duration << endl;
//Sync and Destroy streams and events
for (int i = 0; i < numDevices; ++i)
{
cudaSetDevice(i);
//Destroy Events
cudaEventDestroy(events[i]);
cudaEventDestroy(nHaloEvent[i]);
cudaEventDestroy(sHaloEvent[i]);
cudaEventDestroy(eHaloEvent[i]);
cudaEventDestroy(wHaloEvent[i]);
//Synchro the streams
cudaStreamSynchronize(streams[i]);
cudaStreamDestroy(streams[i]);
cudaStreamSynchronize(nHaloExchange[i]);
cudaStreamDestroy(nHaloExchange[i]);
cudaStreamSynchronize(sHaloExchange[i]);
cudaStreamDestroy(sHaloExchange[i]);
cudaStreamSynchronize(eHaloExchange[i]);
cudaStreamDestroy(eHaloExchange[i]);
cudaStreamSynchronize(wHaloExchange[i]);
cudaStreamDestroy(wHaloExchange[i]);
}
//Results copied to disk
for (int dev = 0; dev < numDevices; dev++)
{
sendToPrint(&partial_resultOnHost[dev][0], deviceArray[dev].devicePosition_X, deviceArray[dev].devicePosition_Y, numberOfDevicesAlong_X, chunk_X, chunk_Y, dim, size, result, numDevices, iterations - 1, iterations);
}
//==========================================Performance using CUDA stream ends===========================================================================
//Done in phase 2 of development: Disble P2P across devices
if (p2penabled) {
disableP2P(numDevices);
}
//Free memory on device
for (int dev = 0; dev < numDevices; dev++)
{
cudaSetDevice(dev);
cudaFree(d_A0[dev]);
cudaFree(d_A1[dev]);
cudaFree(d_A2[dev]);
cudaFree(d_A3[dev]);
cudaFree(d_A4[dev]);
cudaFree(d_Vec_In[dev]);
cudaFree(d_Vec_Out[dev]);
cudaFree(d_nhalos[dev]);
cudaFree(d_shalos[dev]);
cudaFree(d_ehalos[dev]);
cudaFree(d_whalos[dev]);
cudaFree(d_Rhs[dev]);
cudaFree(x_buffer_south[dev]);
cudaFree(x_buffer_north[dev]);
cudaFree(y_buffer_west[dev]);
cudaFree(y_buffer_east[dev]);
cudaFreeHost(partial_resultOnHost[dev]);
cudaFreeHost(nHalo_pinned[dev]);
cudaFreeHost(sHalo_pinned[dev]);
cudaFreeHost(wHalo_pinned[dev]);
cudaFreeHost(eHalo_pinned[dev]);
cudaDeviceReset();
}
cout << endl << "Device Memory free successful." << endl;
//Take care of dynamic mem location
//delete[] domainDivision;
return cudaSuccess;
}
int performJacobi_MultiGPU2D_Decom(unsigned int dim, unsigned int numJacobiIt, float* A0, float* A1, float* A2, float* A3, float* A4, float* rhs, float* x_in)
{
cudaError_t cudaStatus = performMultiGPUJacobi(dim, numJacobiIt, &A0[0], &A1[0], &A2[0], &A3[0], &A4[0], &rhs[0], &x_in[0]);
if (cudaStatus != cudaSuccess) {
cout << "Computation failed: " << endl;
return 1;
}
if (cudaStatus != cudaSuccess) {
cout << "Cuda Device Reset failed: " << endl;
return 1;
}
return 0;
}
|
b08bb287b7ebd1f8934a15d87052964bdab724fa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/cudf_gtest.hpp>
#include <tests/utilities/type_list_utilities.hpp>
#include <tests/utilities/type_lists.hpp>
#include <thrust/device_vector.h>
#include <gmock/gmock.h>
struct DispatcherTest : public cudf::test::BaseFixture {};
template <typename T>
struct TypedDispatcherTest : public DispatcherTest {};
TYPED_TEST_CASE(TypedDispatcherTest, cudf::test::AllTypes);
namespace {
template <typename Expected>
struct type_tester {
template <typename Dispatched>
bool operator()() {
return std::is_same<Expected, Dispatched>::value;
}
};
} // namespace
TYPED_TEST(TypedDispatcherTest, TypeToId) {
EXPECT_TRUE(cudf::experimental::type_dispatcher(
cudf::data_type{cudf::experimental::type_to_id<TypeParam>()},
type_tester<TypeParam>{}));
}
namespace {
struct verify_dispatched_type {
template <typename T>
__host__ __device__ bool operator()(cudf::type_id id) {
return id == cudf::experimental::type_to_id<T>();
}
};
__global__ void dispatch_test_kernel(cudf::type_id id, bool* d_result) {
if (0 == threadIdx.x + blockIdx.x * blockDim.x)
*d_result = cudf::experimental::type_dispatcher(cudf::data_type{id},
verify_dispatched_type{}, id);
}
} // namespace
TYPED_TEST(TypedDispatcherTest, DeviceDispatch) {
thrust::device_vector<bool> result(1, false);
hipLaunchKernelGGL(( dispatch_test_kernel), dim3(1), dim3(1), 0, 0, cudf::experimental::type_to_id<TypeParam>(),
result.data().get());
hipDeviceSynchronize();
EXPECT_EQ(true, result[0]);
}
struct IdDispatcherTest : public DispatcherTest,
public testing::WithParamInterface<cudf::type_id> {};
INSTANTIATE_TEST_CASE_P(TestAllIds, IdDispatcherTest,
testing::ValuesIn(cudf::test::all_type_ids));
TEST_P(IdDispatcherTest, IdToType) {
auto t = GetParam();
EXPECT_TRUE(cudf::experimental::type_dispatcher(cudf::data_type{t},
verify_dispatched_type{}, t));
}
|
b08bb287b7ebd1f8934a15d87052964bdab724fa.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/cudf_gtest.hpp>
#include <tests/utilities/type_list_utilities.hpp>
#include <tests/utilities/type_lists.hpp>
#include <thrust/device_vector.h>
#include <gmock/gmock.h>
struct DispatcherTest : public cudf::test::BaseFixture {};
template <typename T>
struct TypedDispatcherTest : public DispatcherTest {};
TYPED_TEST_CASE(TypedDispatcherTest, cudf::test::AllTypes);
namespace {
template <typename Expected>
struct type_tester {
template <typename Dispatched>
bool operator()() {
return std::is_same<Expected, Dispatched>::value;
}
};
} // namespace
TYPED_TEST(TypedDispatcherTest, TypeToId) {
EXPECT_TRUE(cudf::experimental::type_dispatcher(
cudf::data_type{cudf::experimental::type_to_id<TypeParam>()},
type_tester<TypeParam>{}));
}
namespace {
struct verify_dispatched_type {
template <typename T>
__host__ __device__ bool operator()(cudf::type_id id) {
return id == cudf::experimental::type_to_id<T>();
}
};
__global__ void dispatch_test_kernel(cudf::type_id id, bool* d_result) {
if (0 == threadIdx.x + blockIdx.x * blockDim.x)
*d_result = cudf::experimental::type_dispatcher(cudf::data_type{id},
verify_dispatched_type{}, id);
}
} // namespace
TYPED_TEST(TypedDispatcherTest, DeviceDispatch) {
thrust::device_vector<bool> result(1, false);
dispatch_test_kernel<<<1, 1>>>(cudf::experimental::type_to_id<TypeParam>(),
result.data().get());
cudaDeviceSynchronize();
EXPECT_EQ(true, result[0]);
}
struct IdDispatcherTest : public DispatcherTest,
public testing::WithParamInterface<cudf::type_id> {};
INSTANTIATE_TEST_CASE_P(TestAllIds, IdDispatcherTest,
testing::ValuesIn(cudf::test::all_type_ids));
TEST_P(IdDispatcherTest, IdToType) {
auto t = GetParam();
EXPECT_TRUE(cudf::experimental::type_dispatcher(cudf::data_type{t},
verify_dispatched_type{}, t));
}
|
4ad74bd40e353317ceddcce04d36223bdacfde3b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/flatten_op.h"
#include "caffe2/operators/minmax_ops.h"
#include "caffe2/operators/utility_ops.h"
#include "caffe2/utils/math.h"
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/unique.h>
namespace caffe2 {
template <>
bool WeightedSumOp<CUDAContext>::RunOnDevice() {
if (Input(0).IsType<float>()) {
return DoRunWithType<float>();
} else if (Input(0).IsType<at::Half>()) {
return DoRunWithType<at::Half>();
} else {
CAFFE_THROW("Unsupported inputs");
}
return false;
}
template <>
bool SumOp<CUDAContext>::RunOnDevice() {
if (Input(0).IsType<float>()) {
return DoRunWithType<float, float>();
} else if (Input(0).IsType<at::Half>()) {
return DoRunWithType<at::Half, at::Half>();
} else {
CAFFE_THROW("Unsupported inputs");
}
return false;
}
REGISTER_CUDA_OPERATOR(Print, PrintOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Flatten, FlattenOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(FlattenToVec, FlattenToVecOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Alias, AliasOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(ResizeLike, ResizeLikeOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Sum, SumOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(WeightedSum, WeightedSumOp<CUDAContext>);
CAFFE_KNOWN_TYPE(const float*);
REGISTER_CUDA_OPERATOR(EnsureDense, EnsureDenseOp<CUDAContext>);
__global__ void NanCheckKernel(int N, const float* X, bool* result) {
bool has_nan = false;
CUDA_1D_KERNEL_LOOP(i, N) {
// Note: we have no need to do early return, since only if this fails
// will we not need to inspect all elements. No need to optimize the
// case that will fail.
has_nan = has_nan || isnan(X[i]) || isinf(X[i]);
}
__syncthreads();
if (has_nan) {
result[0] = true;
}
}
template <>
bool NanCheckOp<CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
const size_t N = X.numel();
const float* data_ptr = X.data<float>();
ReinitializeTensor(&scratch_, {1}, at::dtype<bool>().device(CUDA));
math::Set<bool, CUDAContext>(
1, false, scratch_.mutable_data<bool>(), &context_);
hipLaunchKernelGGL(( NanCheckKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, X.data<float>(), scratch_.mutable_data<bool>());
bool result = false;
{
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CUDA_ENFORCE(hipMemcpyAsync(
&result,
scratch_.raw_data(),
1,
hipMemcpyDefault,
context_.cuda_stream()));
}
// Note: we must synchronize here so we can inspect the result
context_.FinishDeviceComputation();
// Print out diagnostic info if we have a NaN or inf
if (result) {
std::cerr << "Tensor contained NaN or inf: " << this->debug_def().input(0)
<< std::endl;
for (int j = 0; j < InputSize(); j++) {
Tensor cpu_X(CPU);
cpu_X.ResizeLike(Input(j));
// Hack to cause allocaiton happen here, so it won't happen
// when we do CopyFrom. We need the mutex then because host->gpu
// copies seem to possibly lock with NCCL.
cpu_X.mutable_data<float>();
{
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
cpu_X.CopyFrom(Input(j)); // sync copy
}
std::cerr << "Input tensor: " << j << ": [" << this->debug_def().input(j)
<< "]" << std::endl;
tensorPrinter_.Print<float>(cpu_X);
if (j == 0) {
std::cerr << "NaN idxs:" << std::endl;
auto* cpu_X_data = cpu_X.data<float>();
for (size_t i = 0; i < cpu_X.numel(); ++i) {
if (std::isnan(cpu_X_data[i]) || std::isinf(cpu_X_data[i])) {
std::cerr << i << " ";
}
}
}
std::cerr << std::endl;
}
return false;
}
// This op should act as an identity matrix if we don't find any NaNs/infs.
// Copy over the data if we are not doing this in-place.
if (&X != Y) {
Y->CopyFrom(X, true /*async*/);
}
return true;
}
REGISTER_CUDA_OPERATOR(NanCheck, NanCheckOp<CUDAContext>);
__global__ void
ElwiseMaxKernel(const float* X, const float* Y, float* maxout, const int N) {
CUDA_1D_KERNEL_LOOP(i, N) {
maxout[i] = fmaxf(X[i], Y[i]);
}
}
template <>
bool MaxOp<float, CUDAContext>::Compute() {
float* output_data = Output(0)->template mutable_data<float>();
const int N = Input(0).numel();
// Run pairwise-maxes
for (int i = 1; i < InputSize(); ++i) {
hipLaunchKernelGGL(( ElwiseMaxKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
(i == 0 ? Input(0).data<float>() : Output(0)->data<float>()),
Input(i).data<float>(),
output_data,
N);
}
return true;
}
REGISTER_CUDA_OPERATOR(Max, MaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(MaxGradient, MaxGradientOp<float, CUDAContext>);
__global__ void
ElwiseMinKernel(const float* X, const float* Y, float* minout, const int N) {
CUDA_1D_KERNEL_LOOP(i, N) {
minout[i] = fminf(X[i], Y[i]);
}
}
template <>
bool MinOp<float, CUDAContext>::Compute() {
float* output_data = Output(0)->template mutable_data<float>();
const int N = Input(0).numel();
// Run pairwise-mines
for (int i = 1; i < InputSize(); ++i) {
hipLaunchKernelGGL(( ElwiseMinKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
(i == 0 ? Input(0).data<float>() : Output(0)->data<float>()),
Input(i).data<float>(),
output_data,
N);
}
return true;
}
REGISTER_CUDA_OPERATOR(Min, MinOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(MinGradient, MinGradientOp<float, CUDAContext>);
template <typename T>
__global__ void
MaxMinGradKernel(int N, const T* mx, const T* x, const T* go, T* gi) {
CUDA_1D_KERNEL_LOOP(i, N) {
gi[i] = go[i] * (mx[i] == x[i]);
}
}
template <>
bool SelectGradientOpBase<float, CUDAContext>::RunOnDevice() {
auto& output = Input(0);
auto& grad_output = Input(1);
const int kInputStartOffset = 2;
const float* data = output.data<float>();
for (int i = 0; i < OutputSize(); i++) {
auto& input = Input(i + kInputStartOffset);
auto* grad_input = Output(i, input.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( MaxMinGradKernel),
dim3(CAFFE_GET_BLOCKS(input.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
input.numel(),
output.data<float>(),
input.data<float>(),
grad_output.data<float>(),
grad_input->template mutable_data<float>());
}
return true;
}
/**
* @brief Update slices of Y in-place with a batch of weighted X's.
* Y[idx] = alpha[b] * X[b][i] + Y[idx]
* i=0,...,N-1
* b=0,...,B-1
* idx=Indices[i]
*/
template <typename T_INDEX>
__global__ void AxpySliceKernel(
const float* weight0,
const int64_t N,
const int64_t B,
const int64_t slice_size,
const float** alpha,
const float** X,
const T_INDEX* Indices,
float* Y,
const int64_t M) {
// This implementation requires that the first weight is 1.0
CUDA_KERNEL_ASSERT(weight0[0] == 1.0);
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T_INDEX idx = Indices[i];
float* y_offset = Y + (idx * slice_size);
for (int b = 0; b < B; b++) {
float a = *alpha[b];
const float* x_offset = X[b] + (i * slice_size);
for (int j = threadIdx.x; j < slice_size; j += blockDim.x) {
atomicAdd(&y_offset[j], a * x_offset[j]);
}
}
}
}
template <>
bool ScatterWeightedSumOp<float, CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(2));
}
template <>
template <typename Index>
bool ScatterWeightedSumOp<float, CUDAContext>::DoRunWithType() {
CAFFE_ENFORCE_EQ(InputSize() % 2, 1);
auto& X0 = Input(0);
auto& weight0 = Input(1);
auto& indices = Input(2);
auto* output = Output(0);
CAFFE_ENFORCE_EQ(&X0, output, "In place operation is required");
CAFFE_ENFORCE_GT(X0.numel(), 0);
CAFFE_ENFORCE_GT(X0.dim(), 0, "X0 has to be at least the vector");
CAFFE_ENFORCE_EQ(weight0.numel(), 1);
int64_t M = X0.numel();
int64_t N = X0.dim(0);
int64_t K = indices.numel();
int64_t block_size = M / N;
float* data = output->template mutable_data<float>();
// In order to have all device pointers of x_i (and weight_i similarly)
// consecutively in device memory, copy pointers to a host vector and then
// copy back into a device array.
const int64_t B = (InputSize() - 3) / 2;
ReinitializeTensor(&x_data_host_, {B}, at::dtype<const float*>().device(CPU));
ReinitializeTensor(&weights_host_, {B}, at::dtype<const float*>().device(CPU));
ReinitializeTensor(&x_data_device_, {B}, at::dtype<const float*>().device(CUDA));
ReinitializeTensor(&weights_device_, {B}, at::dtype<const float*>().device(CUDA));
const float** x_data_host = x_data_host_.mutable_data<const float*>();
const float** weights_host = weights_host_.mutable_data<const float*>();
const float** x_data_device = x_data_device_.mutable_data<const float*>();
const float** weights_device = weights_device_.mutable_data<const float*>();
for (int inp = 3; inp < InputSize(); inp += 2) {
int idx = (inp - 3) / 2;
x_data_host[idx] = static_cast<const float*>(Input(inp).raw_data());
weights_host[idx] = static_cast<const float*>(Input(inp + 1).raw_data());
}
context_.Copy<const float*, CPUContext, CUDAContext>(
B, x_data_host, x_data_device);
context_.Copy<const float*, CPUContext, CUDAContext>(
B, weights_host, weights_device);
hipLaunchKernelGGL(( AxpySliceKernel),
dim3(std::min<int64_t>(K, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
weight0.template data<float>(),
K,
B,
block_size,
weights_device,
x_data_device,
indices.template data<Index>(),
data,
M);
return true;
}
REGISTER_CUDA_OPERATOR(
ScatterWeightedSum,
ScatterWeightedSumOp<float, CUDAContext>);
namespace {
template <typename Index, typename T>
__global__ void scatter_assign_kernel(
T* data,
const Index* idxs,
const T* slicesData,
int64_t N,
int64_t K,
int64_t block_size) {
for (int64_t i = blockIdx.x; i < K; i += gridDim.x) {
Index idx = idxs[i];
CUDA_KERNEL_ASSERT(0 <= idx && idx < N);
const T* src = slicesData + block_size * i;
T* dest = data + block_size * idx;
for (int64_t j = threadIdx.x; j < block_size; j += blockDim.x) {
dest[j] = src[j];
}
}
}
} // namespace
template <>
template <typename Index, typename T>
void ScatterAssignOp<CUDAContext>::DoScatterAssign(
T* data,
const Index* idxs,
const T* slicesData,
int64_t N,
int64_t K,
int64_t block_size) {
hipLaunchKernelGGL(( scatter_assign_kernel),
dim3(::min(K, static_cast<int64_t>(CAFFE_MAXIMUM_NUM_BLOCKS))),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), data, idxs, slicesData, N, K, block_size);
}
REGISTER_CUDA_OPERATOR(ScatterAssign, ScatterAssignOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Size, SizeOp<CUDAContext>);
template <typename T>
__global__ void RangeKernel(const int n, T* Y, T offset, T step) {
CUDA_1D_KERNEL_LOOP(index, n) {
Y[index] = index * step + offset;
}
}
template <>
template <typename T>
bool RangeOp<CUDAContext>::DoRunOnDevice(
const T& start,
const T& step,
Tensor* output) {
int N = output->numel();
hipLaunchKernelGGL(( RangeKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, output->template mutable_data<T>(), start, step);
return true;
}
REGISTER_CUDA_OPERATOR(Range, RangeOp<CUDAContext>);
} // namespace caffe2
|
4ad74bd40e353317ceddcce04d36223bdacfde3b.cu
|
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/flatten_op.h"
#include "caffe2/operators/minmax_ops.h"
#include "caffe2/operators/utility_ops.h"
#include "caffe2/utils/math.h"
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/unique.h>
namespace caffe2 {
template <>
bool WeightedSumOp<CUDAContext>::RunOnDevice() {
if (Input(0).IsType<float>()) {
return DoRunWithType<float>();
} else if (Input(0).IsType<at::Half>()) {
return DoRunWithType<at::Half>();
} else {
CAFFE_THROW("Unsupported inputs");
}
return false;
}
template <>
bool SumOp<CUDAContext>::RunOnDevice() {
if (Input(0).IsType<float>()) {
return DoRunWithType<float, float>();
} else if (Input(0).IsType<at::Half>()) {
return DoRunWithType<at::Half, at::Half>();
} else {
CAFFE_THROW("Unsupported inputs");
}
return false;
}
REGISTER_CUDA_OPERATOR(Print, PrintOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Flatten, FlattenOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(FlattenToVec, FlattenToVecOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Alias, AliasOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(ResizeLike, ResizeLikeOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Sum, SumOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(WeightedSum, WeightedSumOp<CUDAContext>);
CAFFE_KNOWN_TYPE(const float*);
REGISTER_CUDA_OPERATOR(EnsureDense, EnsureDenseOp<CUDAContext>);
__global__ void NanCheckKernel(int N, const float* X, bool* result) {
bool has_nan = false;
CUDA_1D_KERNEL_LOOP(i, N) {
// Note: we have no need to do early return, since only if this fails
// will we not need to inspect all elements. No need to optimize the
// case that will fail.
has_nan = has_nan || isnan(X[i]) || isinf(X[i]);
}
__syncthreads();
if (has_nan) {
result[0] = true;
}
}
template <>
bool NanCheckOp<CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
const size_t N = X.numel();
const float* data_ptr = X.data<float>();
ReinitializeTensor(&scratch_, {1}, at::dtype<bool>().device(CUDA));
math::Set<bool, CUDAContext>(
1, false, scratch_.mutable_data<bool>(), &context_);
NanCheckKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, X.data<float>(), scratch_.mutable_data<bool>());
bool result = false;
{
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CUDA_ENFORCE(cudaMemcpyAsync(
&result,
scratch_.raw_data(),
1,
cudaMemcpyDefault,
context_.cuda_stream()));
}
// Note: we must synchronize here so we can inspect the result
context_.FinishDeviceComputation();
// Print out diagnostic info if we have a NaN or inf
if (result) {
std::cerr << "Tensor contained NaN or inf: " << this->debug_def().input(0)
<< std::endl;
for (int j = 0; j < InputSize(); j++) {
Tensor cpu_X(CPU);
cpu_X.ResizeLike(Input(j));
// Hack to cause allocaiton happen here, so it won't happen
// when we do CopyFrom. We need the mutex then because host->gpu
// copies seem to possibly lock with NCCL.
cpu_X.mutable_data<float>();
{
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
cpu_X.CopyFrom(Input(j)); // sync copy
}
std::cerr << "Input tensor: " << j << ": [" << this->debug_def().input(j)
<< "]" << std::endl;
tensorPrinter_.Print<float>(cpu_X);
if (j == 0) {
std::cerr << "NaN idxs:" << std::endl;
auto* cpu_X_data = cpu_X.data<float>();
for (size_t i = 0; i < cpu_X.numel(); ++i) {
if (std::isnan(cpu_X_data[i]) || std::isinf(cpu_X_data[i])) {
std::cerr << i << " ";
}
}
}
std::cerr << std::endl;
}
return false;
}
// This op should act as an identity matrix if we don't find any NaNs/infs.
// Copy over the data if we are not doing this in-place.
if (&X != Y) {
Y->CopyFrom(X, true /*async*/);
}
return true;
}
REGISTER_CUDA_OPERATOR(NanCheck, NanCheckOp<CUDAContext>);
__global__ void
ElwiseMaxKernel(const float* X, const float* Y, float* maxout, const int N) {
CUDA_1D_KERNEL_LOOP(i, N) {
maxout[i] = fmaxf(X[i], Y[i]);
}
}
template <>
bool MaxOp<float, CUDAContext>::Compute() {
float* output_data = Output(0)->template mutable_data<float>();
const int N = Input(0).numel();
// Run pairwise-maxes
for (int i = 1; i < InputSize(); ++i) {
ElwiseMaxKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
(i == 0 ? Input(0).data<float>() : Output(0)->data<float>()),
Input(i).data<float>(),
output_data,
N);
}
return true;
}
REGISTER_CUDA_OPERATOR(Max, MaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(MaxGradient, MaxGradientOp<float, CUDAContext>);
__global__ void
ElwiseMinKernel(const float* X, const float* Y, float* minout, const int N) {
CUDA_1D_KERNEL_LOOP(i, N) {
minout[i] = fminf(X[i], Y[i]);
}
}
template <>
bool MinOp<float, CUDAContext>::Compute() {
float* output_data = Output(0)->template mutable_data<float>();
const int N = Input(0).numel();
// Run pairwise-mines
for (int i = 1; i < InputSize(); ++i) {
ElwiseMinKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
(i == 0 ? Input(0).data<float>() : Output(0)->data<float>()),
Input(i).data<float>(),
output_data,
N);
}
return true;
}
REGISTER_CUDA_OPERATOR(Min, MinOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(MinGradient, MinGradientOp<float, CUDAContext>);
template <typename T>
__global__ void
MaxMinGradKernel(int N, const T* mx, const T* x, const T* go, T* gi) {
CUDA_1D_KERNEL_LOOP(i, N) {
gi[i] = go[i] * (mx[i] == x[i]);
}
}
template <>
bool SelectGradientOpBase<float, CUDAContext>::RunOnDevice() {
auto& output = Input(0);
auto& grad_output = Input(1);
const int kInputStartOffset = 2;
const float* data = output.data<float>();
for (int i = 0; i < OutputSize(); i++) {
auto& input = Input(i + kInputStartOffset);
auto* grad_input = Output(i, input.sizes(), at::dtype<float>());
MaxMinGradKernel<<<
CAFFE_GET_BLOCKS(input.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
input.numel(),
output.data<float>(),
input.data<float>(),
grad_output.data<float>(),
grad_input->template mutable_data<float>());
}
return true;
}
/**
* @brief Update slices of Y in-place with a batch of weighted X's.
* Y[idx] = alpha[b] * X[b][i] + Y[idx]
* i=0,...,N-1
* b=0,...,B-1
* idx=Indices[i]
*/
template <typename T_INDEX>
__global__ void AxpySliceKernel(
const float* weight0,
const int64_t N,
const int64_t B,
const int64_t slice_size,
const float** alpha,
const float** X,
const T_INDEX* Indices,
float* Y,
const int64_t M) {
// This implementation requires that the first weight is 1.0
CUDA_KERNEL_ASSERT(weight0[0] == 1.0);
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T_INDEX idx = Indices[i];
float* y_offset = Y + (idx * slice_size);
for (int b = 0; b < B; b++) {
float a = *alpha[b];
const float* x_offset = X[b] + (i * slice_size);
for (int j = threadIdx.x; j < slice_size; j += blockDim.x) {
atomicAdd(&y_offset[j], a * x_offset[j]);
}
}
}
}
template <>
bool ScatterWeightedSumOp<float, CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(2));
}
template <>
template <typename Index>
bool ScatterWeightedSumOp<float, CUDAContext>::DoRunWithType() {
CAFFE_ENFORCE_EQ(InputSize() % 2, 1);
auto& X0 = Input(0);
auto& weight0 = Input(1);
auto& indices = Input(2);
auto* output = Output(0);
CAFFE_ENFORCE_EQ(&X0, output, "In place operation is required");
CAFFE_ENFORCE_GT(X0.numel(), 0);
CAFFE_ENFORCE_GT(X0.dim(), 0, "X0 has to be at least the vector");
CAFFE_ENFORCE_EQ(weight0.numel(), 1);
int64_t M = X0.numel();
int64_t N = X0.dim(0);
int64_t K = indices.numel();
int64_t block_size = M / N;
float* data = output->template mutable_data<float>();
// In order to have all device pointers of x_i (and weight_i similarly)
// consecutively in device memory, copy pointers to a host vector and then
// copy back into a device array.
const int64_t B = (InputSize() - 3) / 2;
ReinitializeTensor(&x_data_host_, {B}, at::dtype<const float*>().device(CPU));
ReinitializeTensor(&weights_host_, {B}, at::dtype<const float*>().device(CPU));
ReinitializeTensor(&x_data_device_, {B}, at::dtype<const float*>().device(CUDA));
ReinitializeTensor(&weights_device_, {B}, at::dtype<const float*>().device(CUDA));
const float** x_data_host = x_data_host_.mutable_data<const float*>();
const float** weights_host = weights_host_.mutable_data<const float*>();
const float** x_data_device = x_data_device_.mutable_data<const float*>();
const float** weights_device = weights_device_.mutable_data<const float*>();
for (int inp = 3; inp < InputSize(); inp += 2) {
int idx = (inp - 3) / 2;
x_data_host[idx] = static_cast<const float*>(Input(inp).raw_data());
weights_host[idx] = static_cast<const float*>(Input(inp + 1).raw_data());
}
context_.Copy<const float*, CPUContext, CUDAContext>(
B, x_data_host, x_data_device);
context_.Copy<const float*, CPUContext, CUDAContext>(
B, weights_host, weights_device);
AxpySliceKernel<<<
std::min<int64_t>(K, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
weight0.template data<float>(),
K,
B,
block_size,
weights_device,
x_data_device,
indices.template data<Index>(),
data,
M);
return true;
}
REGISTER_CUDA_OPERATOR(
ScatterWeightedSum,
ScatterWeightedSumOp<float, CUDAContext>);
namespace {
template <typename Index, typename T>
__global__ void scatter_assign_kernel(
T* data,
const Index* idxs,
const T* slicesData,
int64_t N,
int64_t K,
int64_t block_size) {
for (int64_t i = blockIdx.x; i < K; i += gridDim.x) {
Index idx = idxs[i];
CUDA_KERNEL_ASSERT(0 <= idx && idx < N);
const T* src = slicesData + block_size * i;
T* dest = data + block_size * idx;
for (int64_t j = threadIdx.x; j < block_size; j += blockDim.x) {
dest[j] = src[j];
}
}
}
} // namespace
template <>
template <typename Index, typename T>
void ScatterAssignOp<CUDAContext>::DoScatterAssign(
T* data,
const Index* idxs,
const T* slicesData,
int64_t N,
int64_t K,
int64_t block_size) {
scatter_assign_kernel<<<
std::min(K, static_cast<int64_t>(CAFFE_MAXIMUM_NUM_BLOCKS)),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(data, idxs, slicesData, N, K, block_size);
}
REGISTER_CUDA_OPERATOR(ScatterAssign, ScatterAssignOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Size, SizeOp<CUDAContext>);
template <typename T>
__global__ void RangeKernel(const int n, T* Y, T offset, T step) {
CUDA_1D_KERNEL_LOOP(index, n) {
Y[index] = index * step + offset;
}
}
template <>
template <typename T>
bool RangeOp<CUDAContext>::DoRunOnDevice(
const T& start,
const T& step,
Tensor* output) {
int N = output->numel();
RangeKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, output->template mutable_data<T>(), start, step);
return true;
}
REGISTER_CUDA_OPERATOR(Range, RangeOp<CUDAContext>);
} // namespace caffe2
|
194de83d955009551a6cf98dafd3eb94de0c2719.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <cuml/manifold/tsne.h>
#include <datasets/digits.h>
#include <gtest/gtest.h>
#include <stdio.h>
#include <stdlib.h>
#include <common/device_buffer.hpp>
#include <cuda_utils.cuh>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/common/logger.hpp>
#include <iostream>
#include <score/scores.cuh>
#include <vector>
using namespace MLCommon;
using namespace MLCommon::Score;
using namespace MLCommon::Distance;
using namespace MLCommon::Datasets::Digits;
using namespace ML;
class TSNETest : public ::testing::Test {
protected:
void basicTest() {
raft::handle_t handle;
// Allocate memory
device_buffer<float> X_d(handle.get_device_allocator(), handle.get_stream(),
n * p);
MLCommon::updateDevice(X_d.data(), digits.data(), n * p,
handle.get_stream());
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
device_buffer<float> Y_d(handle.get_device_allocator(), handle.get_stream(),
n * 2);
// Test Barnes Hut
TSNE_fit(handle, X_d.data(), Y_d.data(), n, p, 2, 90, 0.5, 0.0025, 50, 100,
1e-5, 12, 250, 0.01, 200, 500, 1000, 1e-7, 0.5, 0.8, -1);
// Move embeddings to host.
// This can be used for printing if needed.
float *embeddings_h = (float *)malloc(sizeof(float) * n * 2);
assert(embeddings_h != NULL);
MLCommon::updateHost(&embeddings_h[0], Y_d.data(), n * 2,
handle.get_stream());
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
// Transpose the data
int k = 0;
float C_contiguous_embedding[n * 2];
for (int i = 0; i < n; i++) {
for (int j = 0; j < 2; j++)
C_contiguous_embedding[k++] = embeddings_h[j * n + i];
}
// Move transposed embeddings back to device, as trustworthiness requires C contiguous format
MLCommon::updateDevice(Y_d.data(), C_contiguous_embedding, n * 2,
handle.get_stream());
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
// Test trustworthiness
score_bh =
trustworthiness_score<float,
ML::Distance::DistanceType::EucUnexpandedL2Sqrt>(
X_d.data(), Y_d.data(), n, p, 2, 5, handle.get_device_allocator(),
handle.get_stream());
// Test Exact TSNE
TSNE_fit(handle, X_d.data(), Y_d.data(), n, p, 2, 90, 0.5, 0.0025, 50, 100,
1e-5, 12, 250, 0.01, 200, 500, 1000, 1e-7, 0.5, 0.8, -1,
CUML_LEVEL_INFO, false, false);
MLCommon::updateHost(&embeddings_h[0], Y_d.data(), n * 2,
handle.get_stream());
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
// Move embeddings to host.
// This can be used for printing if needed.
k = 0;
for (int i = 0; i < n; i++) {
for (int j = 0; j < 2; j++)
C_contiguous_embedding[k++] = embeddings_h[j * n + i];
}
// Move transposed embeddings back to device, as trustworthiness requires C contiguous format
MLCommon::updateDevice(Y_d.data(), C_contiguous_embedding, n * 2,
handle.get_stream());
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
// Test trustworthiness
score_exact =
trustworthiness_score<float,
ML::Distance::DistanceType::EucUnexpandedL2Sqrt>(
X_d.data(), Y_d.data(), n, p, 2, 5, handle.get_device_allocator(),
handle.get_stream());
// Free space
free(embeddings_h);
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
int n = 1797;
int p = 64;
double score_bh;
double score_exact;
};
typedef TSNETest TSNETestF;
TEST_F(TSNETestF, Result) {
if (score_bh < 0.98) CUML_LOG_DEBUG("BH score = %f", score_bh);
if (score_exact < 0.98) CUML_LOG_DEBUG("Exact score = %f", score_exact);
ASSERT_TRUE(0.98 < score_bh && 0.98 < score_exact);
}
|
194de83d955009551a6cf98dafd3eb94de0c2719.cu
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <cuml/manifold/tsne.h>
#include <datasets/digits.h>
#include <gtest/gtest.h>
#include <stdio.h>
#include <stdlib.h>
#include <common/device_buffer.hpp>
#include <cuda_utils.cuh>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/common/logger.hpp>
#include <iostream>
#include <score/scores.cuh>
#include <vector>
using namespace MLCommon;
using namespace MLCommon::Score;
using namespace MLCommon::Distance;
using namespace MLCommon::Datasets::Digits;
using namespace ML;
class TSNETest : public ::testing::Test {
protected:
void basicTest() {
raft::handle_t handle;
// Allocate memory
device_buffer<float> X_d(handle.get_device_allocator(), handle.get_stream(),
n * p);
MLCommon::updateDevice(X_d.data(), digits.data(), n * p,
handle.get_stream());
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
device_buffer<float> Y_d(handle.get_device_allocator(), handle.get_stream(),
n * 2);
// Test Barnes Hut
TSNE_fit(handle, X_d.data(), Y_d.data(), n, p, 2, 90, 0.5, 0.0025, 50, 100,
1e-5, 12, 250, 0.01, 200, 500, 1000, 1e-7, 0.5, 0.8, -1);
// Move embeddings to host.
// This can be used for printing if needed.
float *embeddings_h = (float *)malloc(sizeof(float) * n * 2);
assert(embeddings_h != NULL);
MLCommon::updateHost(&embeddings_h[0], Y_d.data(), n * 2,
handle.get_stream());
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
// Transpose the data
int k = 0;
float C_contiguous_embedding[n * 2];
for (int i = 0; i < n; i++) {
for (int j = 0; j < 2; j++)
C_contiguous_embedding[k++] = embeddings_h[j * n + i];
}
// Move transposed embeddings back to device, as trustworthiness requires C contiguous format
MLCommon::updateDevice(Y_d.data(), C_contiguous_embedding, n * 2,
handle.get_stream());
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
// Test trustworthiness
score_bh =
trustworthiness_score<float,
ML::Distance::DistanceType::EucUnexpandedL2Sqrt>(
X_d.data(), Y_d.data(), n, p, 2, 5, handle.get_device_allocator(),
handle.get_stream());
// Test Exact TSNE
TSNE_fit(handle, X_d.data(), Y_d.data(), n, p, 2, 90, 0.5, 0.0025, 50, 100,
1e-5, 12, 250, 0.01, 200, 500, 1000, 1e-7, 0.5, 0.8, -1,
CUML_LEVEL_INFO, false, false);
MLCommon::updateHost(&embeddings_h[0], Y_d.data(), n * 2,
handle.get_stream());
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
// Move embeddings to host.
// This can be used for printing if needed.
k = 0;
for (int i = 0; i < n; i++) {
for (int j = 0; j < 2; j++)
C_contiguous_embedding[k++] = embeddings_h[j * n + i];
}
// Move transposed embeddings back to device, as trustworthiness requires C contiguous format
MLCommon::updateDevice(Y_d.data(), C_contiguous_embedding, n * 2,
handle.get_stream());
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
// Test trustworthiness
score_exact =
trustworthiness_score<float,
ML::Distance::DistanceType::EucUnexpandedL2Sqrt>(
X_d.data(), Y_d.data(), n, p, 2, 5, handle.get_device_allocator(),
handle.get_stream());
// Free space
free(embeddings_h);
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
int n = 1797;
int p = 64;
double score_bh;
double score_exact;
};
typedef TSNETest TSNETestF;
TEST_F(TSNETestF, Result) {
if (score_bh < 0.98) CUML_LOG_DEBUG("BH score = %f", score_bh);
if (score_exact < 0.98) CUML_LOG_DEBUG("Exact score = %f", score_exact);
ASSERT_TRUE(0.98 < score_bh && 0.98 < score_exact);
}
|
3db1b05d10a5a91fd5d8515f95c0ff766c237ee6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <fstream>
#include <thread>
#include <chrono>
#include <atomic>
using namespace std;
int n;
int *a = NULL;
int *b = NULL;
int *c = NULL;
atomic<int> cnt(0);
bool run = false;
__global__ void kernel(int* a, int* b, int*c, int l, int r){
int i = blockIdx.x*blockDim.x+threadIdx.x;
if(l <= i && i < r)
c[i] = a[i] + b[i];
}
void slave(int id){
hipSetDevice(id);
hipSetDeviceFlags(hipDeviceMapHost);
cnt++;
while(!run) this_thread::sleep_for(chrono::milliseconds(20));
hipLaunchKernelGGL(( kernel), dim3((n/3+31)), dim3(32), 0, 0, a, b, c, n/3*id, n/3*(id+1));
hipDeviceSynchronize();
cnt++;
}
int main(){
ifstream in("input.txt");
ofstream out("output.txt");
thread slave0(slave, 0);
thread slave1(slave, 1);
while(cnt != 2) this_thread::sleep_for(chrono::milliseconds(20));
in >> n;
hipHostMalloc(&a, n*sizeof(int), hipHostMallocMapped);
hipHostMalloc(&b, n*sizeof(int), hipHostMallocMapped);
hipHostMalloc(&c, n*sizeof(int), hipHostMallocMapped);
for(int i = 0; i < n ; i++) in >> a[i];
for(int i = 0; i < n ; i++) in >> b[i];
run = true;
for(int i = n/3*2; i < n; i++)
c[i] = a[i] + b[i];
if(slave0.joinable())
slave0.join();
if(slave1.joinable())
slave1.join();
for(int i = 0; i < n; i++)
out << c[i] << ' ';
return 0;
}
|
3db1b05d10a5a91fd5d8515f95c0ff766c237ee6.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <fstream>
#include <thread>
#include <chrono>
#include <atomic>
using namespace std;
int n;
int *a = NULL;
int *b = NULL;
int *c = NULL;
atomic<int> cnt(0);
bool run = false;
__global__ void kernel(int* a, int* b, int*c, int l, int r){
int i = blockIdx.x*blockDim.x+threadIdx.x;
if(l <= i && i < r)
c[i] = a[i] + b[i];
}
void slave(int id){
cudaSetDevice(id);
cudaSetDeviceFlags(cudaDeviceMapHost);
cnt++;
while(!run) this_thread::sleep_for(chrono::milliseconds(20));
kernel<<<(n/3+31), 32>>>(a, b, c, n/3*id, n/3*(id+1));
cudaDeviceSynchronize();
cnt++;
}
int main(){
ifstream in("input.txt");
ofstream out("output.txt");
thread slave0(slave, 0);
thread slave1(slave, 1);
while(cnt != 2) this_thread::sleep_for(chrono::milliseconds(20));
in >> n;
cudaHostAlloc(&a, n*sizeof(int), cudaHostAllocMapped);
cudaHostAlloc(&b, n*sizeof(int), cudaHostAllocMapped);
cudaHostAlloc(&c, n*sizeof(int), cudaHostAllocMapped);
for(int i = 0; i < n ; i++) in >> a[i];
for(int i = 0; i < n ; i++) in >> b[i];
run = true;
for(int i = n/3*2; i < n; i++)
c[i] = a[i] + b[i];
if(slave0.joinable())
slave0.join();
if(slave1.joinable())
slave1.join();
for(int i = 0; i < n; i++)
out << c[i] << ' ';
return 0;
}
|
d6a006a2430c1ad813d0cfd76aff6f8f118843a7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void finishCentroids_64(int* centroidMass, unsigned int* centroidCount, float* centroids) {
int centroidNumber = blockIdx.y * blockDim.y + threadIdx.y;
int dimensionNumber = blockIdx.x * blockDim.x + threadIdx.x;
if ((centroidNumber < 64) && (dimensionNumber < 34)) {
float totalCount = (float)centroidCount[centroidNumber];
float mass = (float)centroidMass[dimensionNumber * 64 + centroidNumber];
centroids[dimensionNumber * 64 + centroidNumber] = mass / ((float)INTCONFACTOR * totalCount);
}
}
|
d6a006a2430c1ad813d0cfd76aff6f8f118843a7.cu
|
#include "includes.h"
__global__ void finishCentroids_64(int* centroidMass, unsigned int* centroidCount, float* centroids) {
int centroidNumber = blockIdx.y * blockDim.y + threadIdx.y;
int dimensionNumber = blockIdx.x * blockDim.x + threadIdx.x;
if ((centroidNumber < 64) && (dimensionNumber < 34)) {
float totalCount = (float)centroidCount[centroidNumber];
float mass = (float)centroidMass[dimensionNumber * 64 + centroidNumber];
centroids[dimensionNumber * 64 + centroidNumber] = mass / ((float)INTCONFACTOR * totalCount);
}
}
|
c0394f5e37f3b6010ec3a74b37f21c38ea7584a6.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2015 NVIDIA Corporation. All rights reserved
*
* Sample app to demonstrate use of CUPTI library to obtain metric values
* using callbacks for CUDA runtime APIs
*
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cupti.h>
#define METRIC_NAME_TESLA "branch_efficiency"
#define METRIC_NAME_FERMI "ipc"
#define DRIVER_API_CALL(apiFuncCall) \
do { \
hipError_t _status = apiFuncCall; \
if (_status != hipSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \
__FILE__, __LINE__, #apiFuncCall, _status); \
exit(-1); \
} \
} while (0)
#define RUNTIME_API_CALL(apiFuncCall) \
do { \
hipError_t _status = apiFuncCall; \
if (_status != hipSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #apiFuncCall, hipGetErrorString(_status));\
exit(-1); \
} \
} while (0)
#define CUPTI_CALL(call) \
do { \
CUptiResult _status = call; \
if (_status != CUPTI_SUCCESS) { \
const char *errstr; \
cuptiGetResultString(_status, &errstr); \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #call, errstr); \
exit(-1); \
} \
} while (0)
#define ALIGN_SIZE (8)
#define ALIGN_BUFFER(buffer, align) \
(((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer))
static const char *
getUvmCounterKindString(CUpti_ActivityUnifiedMemoryCounterKind kind)
{
switch (kind)
{
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD:
return "BYTES_TRANSFER_HTOD";
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH:
return "BYTES_TRANSFER_DTOH";
default:
break;
}
return "<unknown>";
}
static void
printActivity(CUpti_Activity *record)
{
switch (record->kind)
{
case CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER:
{
CUpti_ActivityUnifiedMemoryCounter2 *uvm = (CUpti_ActivityUnifiedMemoryCounter2 *)record;
printf("UNIFIED_MEMORY_COUNTER [ %llu %llu ] kind=%s value=%llu src %u dst %u\n",
(unsigned long long)(uvm->start),
(unsigned long long)(uvm->end),
getUvmCounterKindString(uvm->counterKind),
(unsigned long long)uvm->value,
uvm->srcId,
uvm->dstId);
break;
}
case CUPTI_ACTIVITY_KIND_MEMCPY:
{
CUpti_ActivityMemcpy *uvm = (CUpti_ActivityMemcpy *) record;
printf( "MEMORY_CPY [ ID %d/%d/%d :: %llu (ms) :: %llu (bytes) :: %d/%d/%d (Kind)]\n",
(int) uvm->contextId, (int) uvm->correlationId, (int) uvm->deviceId,
(unsigned long long) ((uvm->end-uvm->start)/1e6),
(unsigned long long) (uvm->bytes),
uvm->copyKind, uvm->srcKind, uvm->dstKind);
break;
}
case CUPTI_ACTIVITY_KIND_MEMSET:
{
CUpti_ActivityMemset *uvm = (CUpti_ActivityMemset *) record;
printf( "MEMORY_SET [ ID %d/%d/%d :: %llu (ms) :: %llu (bytes) :: %d (Kind) :: %d (value) ]\n",
(int) uvm->contextId, (int) uvm->correlationId, (int) uvm->deviceId,
(unsigned long long) ((uvm->end-uvm->start)/1e6),
(unsigned long long) (uvm->bytes),
uvm->memoryKind,
uvm->value);
break;
}
case CUPTI_ACTIVITY_KIND_KERNEL:
case CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL:
{
CUpti_ActivityKernel3 *uvm = (CUpti_ActivityKernel3 *) record;
printf( "KERNEL_RUN [ ID %d/%d/%d :: %llu (ms) :: %d/%d/%d (block) :: %d (gridID) :: \
%d (RegPerThd) :: %d/%d (SharedMem) :: %d/%d (LocMem) ]\n",
(int) uvm->contextId, (int) uvm->correlationId, (int) uvm->deviceId,
(unsigned long long) ((uvm->end-uvm->start)/1e6),
(int) uvm->blockX, (int) uvm->blockY, (int) uvm->blockZ,
(int) uvm->gridId, (int) uvm->registersPerThread,
(int) uvm->dynamicSharedMemory, (int) uvm->staticSharedMemory,
(int) uvm->localMemoryPerThread, (int) uvm->localMemoryTotal
);
break;
}
default:
printf(" <unknown>\n");
break;
}
}
// User data for event collection callback
typedef struct MetricData_st {
// the device where metric is being collected
hipDevice_t device;
// the set of event groups to collect for a pass
CUpti_EventGroupSet *eventGroups;
// the current number of events collected in eventIdArray and
// eventValueArray
uint32_t eventIdx;
// the number of entries in eventIdArray and eventValueArray
uint32_t numEvents;
// array of event ids
CUpti_EventID *eventIdArray;
// array of event values
uint64_t *eventValueArray;
} MetricData_t;
static uint64_t kernelDuration;
#define TILE_DIM 64
#define BLOCK_ROWS 8
#define NUM_REPS 100
// Device code
__global__ void VecAdd(const float* A, const float* B, float* C, int N)
{
__shared__ float tile[TILE_DIM];
tile[threadIdx.x] = A[threadIdx.x];
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
static void
initVec(float *vec, int n)
{
for (int i=0; i< n; i++)
vec[i] = i;
}
void CUPTIAPI
getMetricValueCallback(void *userdata, CUpti_CallbackDomain domain,
CUpti_CallbackId cbid, const CUpti_CallbackData *cbInfo)
{
MetricData_t *metricData = (MetricData_t*)userdata;
unsigned int i, j, k;
// This callback is enabled only for launch so we shouldn't see
// anything else.
if (cbid != CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020) {
printf("%s:%d: unexpected cbid %d\n", __FILE__, __LINE__, cbid);
exit(-1);
}
// on entry, enable all the event groups being collected this pass,
// for metrics we collect for all instances of the event
if (cbInfo->callbackSite == CUPTI_API_ENTER) {
hipDeviceSynchronize();
CUPTI_CALL(cuptiSetEventCollectionMode(cbInfo->context,
CUPTI_EVENT_COLLECTION_MODE_KERNEL));
for (i = 0; i < metricData->eventGroups->numEventGroups; i++) {
uint32_t all = 1;
CUPTI_CALL(cuptiEventGroupSetAttribute(metricData->eventGroups->eventGroups[i],
CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES,
sizeof(all), &all));
CUPTI_CALL(cuptiEventGroupEnable(metricData->eventGroups->eventGroups[i]));
}
}
// on exit, read and record event values
if (cbInfo->callbackSite == CUPTI_API_EXIT) {
hipDeviceSynchronize();
// for each group, read the event values from the group and record
// in metricData
for (i = 0; i < metricData->eventGroups->numEventGroups; i++) {
CUpti_EventGroup group = metricData->eventGroups->eventGroups[i];
CUpti_EventDomainID groupDomain;
uint32_t numEvents, numInstances, numTotalInstances;
CUpti_EventID *eventIds;
size_t groupDomainSize = sizeof(groupDomain);
size_t numEventsSize = sizeof(numEvents);
size_t numInstancesSize = sizeof(numInstances);
size_t numTotalInstancesSize = sizeof(numTotalInstances);
uint64_t *values, normalized, sum;
size_t valuesSize, eventIdsSize;
CUPTI_CALL(cuptiEventGroupGetAttribute(group,
CUPTI_EVENT_GROUP_ATTR_EVENT_DOMAIN_ID,
&groupDomainSize, &groupDomain));
CUPTI_CALL(cuptiDeviceGetEventDomainAttribute(metricData->device, groupDomain,
CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT,
&numTotalInstancesSize, &numTotalInstances));
CUPTI_CALL(cuptiEventGroupGetAttribute(group,
CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT,
&numInstancesSize, &numInstances));
CUPTI_CALL(cuptiEventGroupGetAttribute(group,
CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS,
&numEventsSize, &numEvents));
eventIdsSize = numEvents * sizeof(CUpti_EventID);
eventIds = (CUpti_EventID *)malloc(eventIdsSize);
CUPTI_CALL(cuptiEventGroupGetAttribute(group,
CUPTI_EVENT_GROUP_ATTR_EVENTS,
&eventIdsSize, eventIds));
valuesSize = sizeof(uint64_t) * numInstances;
values = (uint64_t *)malloc(valuesSize);
for (j = 0; j < numEvents; j++) {
CUPTI_CALL(cuptiEventGroupReadEvent(group, CUPTI_EVENT_READ_FLAG_NONE,
eventIds[j], &valuesSize, values));
if (metricData->eventIdx >= metricData->numEvents) {
fprintf(stderr, "error: too many events collected, metric expects only %d\n",
(int)metricData->numEvents);
exit(-1);
}
// sum collect event values from all instances
sum = 0;
for (k = 0; k < numInstances; k++)
sum += values[k];
// normalize the event value to represent the total number of
// domain instances on the device
normalized = (sum * numTotalInstances) / numInstances;
metricData->eventIdArray[metricData->eventIdx] = eventIds[j];
metricData->eventValueArray[metricData->eventIdx] = normalized;
metricData->eventIdx++;
// print collected value
{
char eventName[128];
size_t eventNameSize = sizeof(eventName) - 1;
CUPTI_CALL(cuptiEventGetAttribute(eventIds[j], CUPTI_EVENT_ATTR_NAME,
&eventNameSize, eventName));
eventName[127] = '\0';
printf("\t%s = %llu (", eventName, (unsigned long long)sum);
if (numInstances > 1) {
for (k = 0; k < numInstances; k++) {
if (k != 0)
printf(", ");
printf("%llu", (unsigned long long)values[k]);
}
}
printf(")\n");
printf("\t%s (normalized) (%llu * %u) / %u = %llu\n",
eventName, (unsigned long long)sum,
numTotalInstances, numInstances,
(unsigned long long)normalized);
}
}
free(values);
}
for (i = 0; i < metricData->eventGroups->numEventGroups; i++)
CUPTI_CALL(cuptiEventGroupDisable(metricData->eventGroups->eventGroups[i]));
}
}
static void
cleanUp(float *h_A, float *h_B, float *h_C, float *d_A, float *d_B, float *d_C)
{
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
static void
runPass()
{
int N = 10240 * 10240;
size_t size = N * sizeof(float);
int threadsPerBlock = 0;
int blocksPerGrid = 0;
float *h_A, *h_B, *h_C;
float *d_A, *d_B, *d_C;
int i, sum;
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_B = (float*)malloc(size);
h_C = (float*)malloc(size);
// Initialize input vectors
initVec(h_A, N);
initVec(h_B, N);
memset(h_C, 0, size);
// Allocate vectors in device memory
hipMalloc((void**)&d_A, size);
hipMalloc((void**)&d_B, size);
hipMalloc((void**)&d_C, size);
// Copy vectors from host memory to device memory
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
hipMemset(d_A, 0, size);
hipMemset(d_B, 0, size);
// Invoke kernel
threadsPerBlock = 64;
blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
printf("Launching kernel: blocks %d, thread/block %d\n",
blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, N);
// dim3 dimGrid(N/TILE_DIM, N/TILE_DIM, 1);
// dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1);
//
// transposeNoBankConflicts<<<dimGrid, dimBlock>>>(d_C, d_A);
// Copy result from device memory to host memory
// h_C contains the result in host memory
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
// Verify result
// for (i = 0; i < N; ++i) {
// sum = h_A[i] + h_B[i];
// if (h_C[i] != sum) {
// fprintf(stderr, "error: result verification failed\n");
// exit(-1);
// }
// }
cleanUp(h_A, h_B, h_C, d_A, d_B, d_C);
}
static void CUPTIAPI
bufferRequested(uint8_t **buffer, size_t *size, size_t *maxNumRecords)
{
uint8_t *rawBuffer;
*size = 16 * 1024;
rawBuffer = (uint8_t *)malloc(*size + ALIGN_SIZE);
*buffer = ALIGN_BUFFER(rawBuffer, ALIGN_SIZE);
*maxNumRecords = 0;
if (*buffer == NULL) {
printf("Error: out of memory\n");
exit(-1);
}
}
static void CUPTIAPI
bufferCompleted(hipCtx_t ctx, uint32_t streamId, uint8_t *buffer, size_t size, size_t validSize)
{
CUptiResult status;
CUpti_Activity *record = NULL;
// CUpti_ActivityKernel3 *kernel;
//
// //since we launched only 1 kernel, we should have only 1 kernel record
// CUPTI_CALL(cuptiActivityGetNextRecord(buffer, validSize, &record));
//
// kernel = (CUpti_ActivityKernel3 *)record;
// if (kernel->kind != CUPTI_ACTIVITY_KIND_KERNEL) {
// fprintf(stderr, "Error: expected kernel activity record, got %d\n", (int)kernel->kind);
// exit(-1);
// }
//
// kernelDuration = kernel->end - kernel->start;
// free(buffer);
do {
status = cuptiActivityGetNextRecord(buffer, validSize, &record);
if (status == CUPTI_SUCCESS) {
printActivity(record);
}
else if (status == CUPTI_ERROR_MAX_LIMIT_REACHED) {
break;
}
else {
CUPTI_CALL(status);
}
} while (1);
// report any records dropped from the queue
size_t dropped;
CUPTI_CALL(cuptiActivityGetNumDroppedRecords(ctx, streamId, &dropped));
if (dropped != 0) {
printf("Dropped %u activity records\n", (unsigned int)dropped);
}
free(buffer);
}
int
main(int argc, char *argv[])
{
CUpti_SubscriberHandle subscriber;
hipCtx_t context = 0;
hipDevice_t device = 0;
int computeCapabilityMajor=0;
int computeCapabilityMinor=0;
int deviceNum;
int deviceCount;
char deviceName[32];
const char *metricName;
CUpti_MetricID metricId;
CUpti_EventGroupSets *passData;
MetricData_t metricData;
unsigned int pass;
CUpti_MetricValue metricValue;
printf("Usage: %s [device_num] [metric_name]\n", argv[0]);
// make sure activity is enabled before any CUDA API
CUPTI_CALL( cuptiActivityEnable(CUPTI_ACTIVITY_KIND_MEMCPY) );
CUPTI_CALL( cuptiActivityEnable(CUPTI_ACTIVITY_KIND_MEMSET) );
CUPTI_CALL( cuptiActivityEnable(CUPTI_ACTIVITY_KIND_KERNEL) );
DRIVER_API_CALL(hipInit(0));
DRIVER_API_CALL(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
printf("There is no device supporting CUDA.\n");
return -2;
}
if (argc > 1)
deviceNum = atoi(argv[1]);
else
deviceNum = 0;
printf("CUDA Device Number: %d\n", deviceNum);
DRIVER_API_CALL(hipDeviceGet(&device, deviceNum));
DRIVER_API_CALL(hipDeviceGetName(deviceName, 32, device));
printf("CUDA Device Name: %s\n", deviceName);
DRIVER_API_CALL(hipDeviceComputeCapability(&computeCapabilityMajor,
&computeCapabilityMinor,
device));
DRIVER_API_CALL(hipCtxCreate(&context, 0, device));
// Get the name of the metric to collect
if (argc > 2)
metricName = argv[2];
else {
if (computeCapabilityMajor > 1) {
metricName = METRIC_NAME_FERMI;
}
else {
metricName = METRIC_NAME_TESLA;
}
}
// need to collect duration of kernel execution without any event
// collection enabled (some metrics need kernel duration as part of
// calculation). The only accurate way to do this is by using the
// activity API.
// events for timing
hipEvent_t startEvent, stopEvent;
RUNTIME_API_CALL( hipEventCreate(&startEvent) );
RUNTIME_API_CALL( hipEventCreate(&stopEvent) );
float ms;
CUPTI_CALL(cuptiActivityRegisterCallbacks(bufferRequested, bufferCompleted));
RUNTIME_API_CALL( hipEventRecord(startEvent, 0) );
runPass();
RUNTIME_API_CALL( hipEventRecord(stopEvent, 0) );
RUNTIME_API_CALL( hipEventSynchronize(stopEvent) );
RUNTIME_API_CALL( hipEventElapsedTime(&ms, startEvent, stopEvent) );
printf("| time %f\n", ms);
hipDeviceSynchronize();
CUPTI_CALL(cuptiActivityFlushAll(0));
// setup launch callback for event collection
CUPTI_CALL(cuptiSubscribe(&subscriber, (CUpti_CallbackFunc)getMetricValueCallback, &metricData));
CUPTI_CALL(cuptiEnableCallback(1, subscriber, CUPTI_CB_DOMAIN_RUNTIME_API,
CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020));
// allocate space to hold all the events needed for the metric
CUPTI_CALL(cuptiMetricGetIdFromName(device, metricName, &metricId));
CUPTI_CALL(cuptiMetricGetNumEvents(metricId, &metricData.numEvents));
metricData.device = device;
metricData.eventIdArray = (CUpti_EventID *)malloc(metricData.numEvents * sizeof(CUpti_EventID));
metricData.eventValueArray = (uint64_t *)malloc(metricData.numEvents * sizeof(uint64_t));
metricData.eventIdx = 0;
// get the number of passes required to collect all the events
// needed for the metric and the event groups for each pass
CUPTI_CALL(cuptiMetricCreateEventGroupSets(context, sizeof(metricId), &metricId, &passData));
for (pass = 0; pass < passData->numSets; pass++) {
printf("Pass %u\n", pass);
metricData.eventGroups = passData->sets + pass;
runPass();
}
if (metricData.eventIdx != metricData.numEvents) {
fprintf(stderr, "error: expected %u metric events, got %u\n",
metricData.numEvents, metricData.eventIdx);
exit(-1);
}
// use all the collected events to calculate the metric value
CUPTI_CALL(cuptiMetricGetValue(device, metricId,
metricData.numEvents * sizeof(CUpti_EventID),
metricData.eventIdArray,
metricData.numEvents * sizeof(uint64_t),
metricData.eventValueArray,
kernelDuration, &metricValue));
// print metric value, we format based on the value kind
{
CUpti_MetricValueKind valueKind;
size_t valueKindSize = sizeof(valueKind);
CUPTI_CALL(cuptiMetricGetAttribute(metricId, CUPTI_METRIC_ATTR_VALUE_KIND,
&valueKindSize, &valueKind));
switch (valueKind) {
case CUPTI_METRIC_VALUE_KIND_DOUBLE:
printf("Metric %s = %f\n", metricName, metricValue.metricValueDouble);
break;
case CUPTI_METRIC_VALUE_KIND_UINT64:
printf("Metric %s = %llu\n", metricName,
(unsigned long long)metricValue.metricValueUint64);
break;
case CUPTI_METRIC_VALUE_KIND_INT64:
printf("Metric %s = %lld\n", metricName,
(long long)metricValue.metricValueInt64);
break;
case CUPTI_METRIC_VALUE_KIND_PERCENT:
printf("Metric %s = %f%%\n", metricName, metricValue.metricValuePercent);
break;
case CUPTI_METRIC_VALUE_KIND_THROUGHPUT:
printf("Metric %s = %llu bytes/sec\n", metricName,
(unsigned long long)metricValue.metricValueThroughput);
break;
case CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL:
printf("Metric %s = utilization level %u\n", metricName,
(unsigned int)metricValue.metricValueUtilizationLevel);
break;
default:
fprintf(stderr, "error: unknown value kind\n");
exit(-1);
}
}
CUPTI_CALL(cuptiUnsubscribe(subscriber));
return 0;
}
|
c0394f5e37f3b6010ec3a74b37f21c38ea7584a6.cu
|
/*
* Copyright 2011-2015 NVIDIA Corporation. All rights reserved
*
* Sample app to demonstrate use of CUPTI library to obtain metric values
* using callbacks for CUDA runtime APIs
*
*/
#include <stdio.h>
#include <cuda.h>
#include <cupti.h>
#define METRIC_NAME_TESLA "branch_efficiency"
#define METRIC_NAME_FERMI "ipc"
#define DRIVER_API_CALL(apiFuncCall) \
do { \
CUresult _status = apiFuncCall; \
if (_status != CUDA_SUCCESS) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \
__FILE__, __LINE__, #apiFuncCall, _status); \
exit(-1); \
} \
} while (0)
#define RUNTIME_API_CALL(apiFuncCall) \
do { \
cudaError_t _status = apiFuncCall; \
if (_status != cudaSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #apiFuncCall, cudaGetErrorString(_status));\
exit(-1); \
} \
} while (0)
#define CUPTI_CALL(call) \
do { \
CUptiResult _status = call; \
if (_status != CUPTI_SUCCESS) { \
const char *errstr; \
cuptiGetResultString(_status, &errstr); \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #call, errstr); \
exit(-1); \
} \
} while (0)
#define ALIGN_SIZE (8)
#define ALIGN_BUFFER(buffer, align) \
(((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer))
static const char *
getUvmCounterKindString(CUpti_ActivityUnifiedMemoryCounterKind kind)
{
switch (kind)
{
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD:
return "BYTES_TRANSFER_HTOD";
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH:
return "BYTES_TRANSFER_DTOH";
default:
break;
}
return "<unknown>";
}
static void
printActivity(CUpti_Activity *record)
{
switch (record->kind)
{
case CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER:
{
CUpti_ActivityUnifiedMemoryCounter2 *uvm = (CUpti_ActivityUnifiedMemoryCounter2 *)record;
printf("UNIFIED_MEMORY_COUNTER [ %llu %llu ] kind=%s value=%llu src %u dst %u\n",
(unsigned long long)(uvm->start),
(unsigned long long)(uvm->end),
getUvmCounterKindString(uvm->counterKind),
(unsigned long long)uvm->value,
uvm->srcId,
uvm->dstId);
break;
}
case CUPTI_ACTIVITY_KIND_MEMCPY:
{
CUpti_ActivityMemcpy *uvm = (CUpti_ActivityMemcpy *) record;
printf( "MEMORY_CPY [ ID %d/%d/%d :: %llu (ms) :: %llu (bytes) :: %d/%d/%d (Kind)]\n",
(int) uvm->contextId, (int) uvm->correlationId, (int) uvm->deviceId,
(unsigned long long) ((uvm->end-uvm->start)/1e6),
(unsigned long long) (uvm->bytes),
uvm->copyKind, uvm->srcKind, uvm->dstKind);
break;
}
case CUPTI_ACTIVITY_KIND_MEMSET:
{
CUpti_ActivityMemset *uvm = (CUpti_ActivityMemset *) record;
printf( "MEMORY_SET [ ID %d/%d/%d :: %llu (ms) :: %llu (bytes) :: %d (Kind) :: %d (value) ]\n",
(int) uvm->contextId, (int) uvm->correlationId, (int) uvm->deviceId,
(unsigned long long) ((uvm->end-uvm->start)/1e6),
(unsigned long long) (uvm->bytes),
uvm->memoryKind,
uvm->value);
break;
}
case CUPTI_ACTIVITY_KIND_KERNEL:
case CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL:
{
CUpti_ActivityKernel3 *uvm = (CUpti_ActivityKernel3 *) record;
printf( "KERNEL_RUN [ ID %d/%d/%d :: %llu (ms) :: %d/%d/%d (block) :: %d (gridID) :: \
%d (RegPerThd) :: %d/%d (SharedMem) :: %d/%d (LocMem) ]\n",
(int) uvm->contextId, (int) uvm->correlationId, (int) uvm->deviceId,
(unsigned long long) ((uvm->end-uvm->start)/1e6),
(int) uvm->blockX, (int) uvm->blockY, (int) uvm->blockZ,
(int) uvm->gridId, (int) uvm->registersPerThread,
(int) uvm->dynamicSharedMemory, (int) uvm->staticSharedMemory,
(int) uvm->localMemoryPerThread, (int) uvm->localMemoryTotal
);
break;
}
default:
printf(" <unknown>\n");
break;
}
}
// User data for event collection callback
typedef struct MetricData_st {
// the device where metric is being collected
CUdevice device;
// the set of event groups to collect for a pass
CUpti_EventGroupSet *eventGroups;
// the current number of events collected in eventIdArray and
// eventValueArray
uint32_t eventIdx;
// the number of entries in eventIdArray and eventValueArray
uint32_t numEvents;
// array of event ids
CUpti_EventID *eventIdArray;
// array of event values
uint64_t *eventValueArray;
} MetricData_t;
static uint64_t kernelDuration;
#define TILE_DIM 64
#define BLOCK_ROWS 8
#define NUM_REPS 100
// Device code
__global__ void VecAdd(const float* A, const float* B, float* C, int N)
{
__shared__ float tile[TILE_DIM];
tile[threadIdx.x] = A[threadIdx.x];
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
static void
initVec(float *vec, int n)
{
for (int i=0; i< n; i++)
vec[i] = i;
}
void CUPTIAPI
getMetricValueCallback(void *userdata, CUpti_CallbackDomain domain,
CUpti_CallbackId cbid, const CUpti_CallbackData *cbInfo)
{
MetricData_t *metricData = (MetricData_t*)userdata;
unsigned int i, j, k;
// This callback is enabled only for launch so we shouldn't see
// anything else.
if (cbid != CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020) {
printf("%s:%d: unexpected cbid %d\n", __FILE__, __LINE__, cbid);
exit(-1);
}
// on entry, enable all the event groups being collected this pass,
// for metrics we collect for all instances of the event
if (cbInfo->callbackSite == CUPTI_API_ENTER) {
cudaDeviceSynchronize();
CUPTI_CALL(cuptiSetEventCollectionMode(cbInfo->context,
CUPTI_EVENT_COLLECTION_MODE_KERNEL));
for (i = 0; i < metricData->eventGroups->numEventGroups; i++) {
uint32_t all = 1;
CUPTI_CALL(cuptiEventGroupSetAttribute(metricData->eventGroups->eventGroups[i],
CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES,
sizeof(all), &all));
CUPTI_CALL(cuptiEventGroupEnable(metricData->eventGroups->eventGroups[i]));
}
}
// on exit, read and record event values
if (cbInfo->callbackSite == CUPTI_API_EXIT) {
cudaDeviceSynchronize();
// for each group, read the event values from the group and record
// in metricData
for (i = 0; i < metricData->eventGroups->numEventGroups; i++) {
CUpti_EventGroup group = metricData->eventGroups->eventGroups[i];
CUpti_EventDomainID groupDomain;
uint32_t numEvents, numInstances, numTotalInstances;
CUpti_EventID *eventIds;
size_t groupDomainSize = sizeof(groupDomain);
size_t numEventsSize = sizeof(numEvents);
size_t numInstancesSize = sizeof(numInstances);
size_t numTotalInstancesSize = sizeof(numTotalInstances);
uint64_t *values, normalized, sum;
size_t valuesSize, eventIdsSize;
CUPTI_CALL(cuptiEventGroupGetAttribute(group,
CUPTI_EVENT_GROUP_ATTR_EVENT_DOMAIN_ID,
&groupDomainSize, &groupDomain));
CUPTI_CALL(cuptiDeviceGetEventDomainAttribute(metricData->device, groupDomain,
CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT,
&numTotalInstancesSize, &numTotalInstances));
CUPTI_CALL(cuptiEventGroupGetAttribute(group,
CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT,
&numInstancesSize, &numInstances));
CUPTI_CALL(cuptiEventGroupGetAttribute(group,
CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS,
&numEventsSize, &numEvents));
eventIdsSize = numEvents * sizeof(CUpti_EventID);
eventIds = (CUpti_EventID *)malloc(eventIdsSize);
CUPTI_CALL(cuptiEventGroupGetAttribute(group,
CUPTI_EVENT_GROUP_ATTR_EVENTS,
&eventIdsSize, eventIds));
valuesSize = sizeof(uint64_t) * numInstances;
values = (uint64_t *)malloc(valuesSize);
for (j = 0; j < numEvents; j++) {
CUPTI_CALL(cuptiEventGroupReadEvent(group, CUPTI_EVENT_READ_FLAG_NONE,
eventIds[j], &valuesSize, values));
if (metricData->eventIdx >= metricData->numEvents) {
fprintf(stderr, "error: too many events collected, metric expects only %d\n",
(int)metricData->numEvents);
exit(-1);
}
// sum collect event values from all instances
sum = 0;
for (k = 0; k < numInstances; k++)
sum += values[k];
// normalize the event value to represent the total number of
// domain instances on the device
normalized = (sum * numTotalInstances) / numInstances;
metricData->eventIdArray[metricData->eventIdx] = eventIds[j];
metricData->eventValueArray[metricData->eventIdx] = normalized;
metricData->eventIdx++;
// print collected value
{
char eventName[128];
size_t eventNameSize = sizeof(eventName) - 1;
CUPTI_CALL(cuptiEventGetAttribute(eventIds[j], CUPTI_EVENT_ATTR_NAME,
&eventNameSize, eventName));
eventName[127] = '\0';
printf("\t%s = %llu (", eventName, (unsigned long long)sum);
if (numInstances > 1) {
for (k = 0; k < numInstances; k++) {
if (k != 0)
printf(", ");
printf("%llu", (unsigned long long)values[k]);
}
}
printf(")\n");
printf("\t%s (normalized) (%llu * %u) / %u = %llu\n",
eventName, (unsigned long long)sum,
numTotalInstances, numInstances,
(unsigned long long)normalized);
}
}
free(values);
}
for (i = 0; i < metricData->eventGroups->numEventGroups; i++)
CUPTI_CALL(cuptiEventGroupDisable(metricData->eventGroups->eventGroups[i]));
}
}
static void
cleanUp(float *h_A, float *h_B, float *h_C, float *d_A, float *d_B, float *d_C)
{
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
static void
runPass()
{
int N = 10240 * 10240;
size_t size = N * sizeof(float);
int threadsPerBlock = 0;
int blocksPerGrid = 0;
float *h_A, *h_B, *h_C;
float *d_A, *d_B, *d_C;
int i, sum;
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_B = (float*)malloc(size);
h_C = (float*)malloc(size);
// Initialize input vectors
initVec(h_A, N);
initVec(h_B, N);
memset(h_C, 0, size);
// Allocate vectors in device memory
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
cudaMemset(d_A, 0, size);
cudaMemset(d_B, 0, size);
// Invoke kernel
threadsPerBlock = 64;
blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
printf("Launching kernel: blocks %d, thread/block %d\n",
blocksPerGrid, threadsPerBlock);
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
// dim3 dimGrid(N/TILE_DIM, N/TILE_DIM, 1);
// dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1);
//
// transposeNoBankConflicts<<<dimGrid, dimBlock>>>(d_C, d_A);
// Copy result from device memory to host memory
// h_C contains the result in host memory
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Verify result
// for (i = 0; i < N; ++i) {
// sum = h_A[i] + h_B[i];
// if (h_C[i] != sum) {
// fprintf(stderr, "error: result verification failed\n");
// exit(-1);
// }
// }
cleanUp(h_A, h_B, h_C, d_A, d_B, d_C);
}
static void CUPTIAPI
bufferRequested(uint8_t **buffer, size_t *size, size_t *maxNumRecords)
{
uint8_t *rawBuffer;
*size = 16 * 1024;
rawBuffer = (uint8_t *)malloc(*size + ALIGN_SIZE);
*buffer = ALIGN_BUFFER(rawBuffer, ALIGN_SIZE);
*maxNumRecords = 0;
if (*buffer == NULL) {
printf("Error: out of memory\n");
exit(-1);
}
}
static void CUPTIAPI
bufferCompleted(CUcontext ctx, uint32_t streamId, uint8_t *buffer, size_t size, size_t validSize)
{
CUptiResult status;
CUpti_Activity *record = NULL;
// CUpti_ActivityKernel3 *kernel;
//
// //since we launched only 1 kernel, we should have only 1 kernel record
// CUPTI_CALL(cuptiActivityGetNextRecord(buffer, validSize, &record));
//
// kernel = (CUpti_ActivityKernel3 *)record;
// if (kernel->kind != CUPTI_ACTIVITY_KIND_KERNEL) {
// fprintf(stderr, "Error: expected kernel activity record, got %d\n", (int)kernel->kind);
// exit(-1);
// }
//
// kernelDuration = kernel->end - kernel->start;
// free(buffer);
do {
status = cuptiActivityGetNextRecord(buffer, validSize, &record);
if (status == CUPTI_SUCCESS) {
printActivity(record);
}
else if (status == CUPTI_ERROR_MAX_LIMIT_REACHED) {
break;
}
else {
CUPTI_CALL(status);
}
} while (1);
// report any records dropped from the queue
size_t dropped;
CUPTI_CALL(cuptiActivityGetNumDroppedRecords(ctx, streamId, &dropped));
if (dropped != 0) {
printf("Dropped %u activity records\n", (unsigned int)dropped);
}
free(buffer);
}
int
main(int argc, char *argv[])
{
CUpti_SubscriberHandle subscriber;
CUcontext context = 0;
CUdevice device = 0;
int computeCapabilityMajor=0;
int computeCapabilityMinor=0;
int deviceNum;
int deviceCount;
char deviceName[32];
const char *metricName;
CUpti_MetricID metricId;
CUpti_EventGroupSets *passData;
MetricData_t metricData;
unsigned int pass;
CUpti_MetricValue metricValue;
printf("Usage: %s [device_num] [metric_name]\n", argv[0]);
// make sure activity is enabled before any CUDA API
CUPTI_CALL( cuptiActivityEnable(CUPTI_ACTIVITY_KIND_MEMCPY) );
CUPTI_CALL( cuptiActivityEnable(CUPTI_ACTIVITY_KIND_MEMSET) );
CUPTI_CALL( cuptiActivityEnable(CUPTI_ACTIVITY_KIND_KERNEL) );
DRIVER_API_CALL(cuInit(0));
DRIVER_API_CALL(cuDeviceGetCount(&deviceCount));
if (deviceCount == 0) {
printf("There is no device supporting CUDA.\n");
return -2;
}
if (argc > 1)
deviceNum = atoi(argv[1]);
else
deviceNum = 0;
printf("CUDA Device Number: %d\n", deviceNum);
DRIVER_API_CALL(cuDeviceGet(&device, deviceNum));
DRIVER_API_CALL(cuDeviceGetName(deviceName, 32, device));
printf("CUDA Device Name: %s\n", deviceName);
DRIVER_API_CALL(cuDeviceComputeCapability(&computeCapabilityMajor,
&computeCapabilityMinor,
device));
DRIVER_API_CALL(cuCtxCreate(&context, 0, device));
// Get the name of the metric to collect
if (argc > 2)
metricName = argv[2];
else {
if (computeCapabilityMajor > 1) {
metricName = METRIC_NAME_FERMI;
}
else {
metricName = METRIC_NAME_TESLA;
}
}
// need to collect duration of kernel execution without any event
// collection enabled (some metrics need kernel duration as part of
// calculation). The only accurate way to do this is by using the
// activity API.
// events for timing
cudaEvent_t startEvent, stopEvent;
RUNTIME_API_CALL( cudaEventCreate(&startEvent) );
RUNTIME_API_CALL( cudaEventCreate(&stopEvent) );
float ms;
CUPTI_CALL(cuptiActivityRegisterCallbacks(bufferRequested, bufferCompleted));
RUNTIME_API_CALL( cudaEventRecord(startEvent, 0) );
runPass();
RUNTIME_API_CALL( cudaEventRecord(stopEvent, 0) );
RUNTIME_API_CALL( cudaEventSynchronize(stopEvent) );
RUNTIME_API_CALL( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
printf("| time %f\n", ms);
cudaDeviceSynchronize();
CUPTI_CALL(cuptiActivityFlushAll(0));
// setup launch callback for event collection
CUPTI_CALL(cuptiSubscribe(&subscriber, (CUpti_CallbackFunc)getMetricValueCallback, &metricData));
CUPTI_CALL(cuptiEnableCallback(1, subscriber, CUPTI_CB_DOMAIN_RUNTIME_API,
CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020));
// allocate space to hold all the events needed for the metric
CUPTI_CALL(cuptiMetricGetIdFromName(device, metricName, &metricId));
CUPTI_CALL(cuptiMetricGetNumEvents(metricId, &metricData.numEvents));
metricData.device = device;
metricData.eventIdArray = (CUpti_EventID *)malloc(metricData.numEvents * sizeof(CUpti_EventID));
metricData.eventValueArray = (uint64_t *)malloc(metricData.numEvents * sizeof(uint64_t));
metricData.eventIdx = 0;
// get the number of passes required to collect all the events
// needed for the metric and the event groups for each pass
CUPTI_CALL(cuptiMetricCreateEventGroupSets(context, sizeof(metricId), &metricId, &passData));
for (pass = 0; pass < passData->numSets; pass++) {
printf("Pass %u\n", pass);
metricData.eventGroups = passData->sets + pass;
runPass();
}
if (metricData.eventIdx != metricData.numEvents) {
fprintf(stderr, "error: expected %u metric events, got %u\n",
metricData.numEvents, metricData.eventIdx);
exit(-1);
}
// use all the collected events to calculate the metric value
CUPTI_CALL(cuptiMetricGetValue(device, metricId,
metricData.numEvents * sizeof(CUpti_EventID),
metricData.eventIdArray,
metricData.numEvents * sizeof(uint64_t),
metricData.eventValueArray,
kernelDuration, &metricValue));
// print metric value, we format based on the value kind
{
CUpti_MetricValueKind valueKind;
size_t valueKindSize = sizeof(valueKind);
CUPTI_CALL(cuptiMetricGetAttribute(metricId, CUPTI_METRIC_ATTR_VALUE_KIND,
&valueKindSize, &valueKind));
switch (valueKind) {
case CUPTI_METRIC_VALUE_KIND_DOUBLE:
printf("Metric %s = %f\n", metricName, metricValue.metricValueDouble);
break;
case CUPTI_METRIC_VALUE_KIND_UINT64:
printf("Metric %s = %llu\n", metricName,
(unsigned long long)metricValue.metricValueUint64);
break;
case CUPTI_METRIC_VALUE_KIND_INT64:
printf("Metric %s = %lld\n", metricName,
(long long)metricValue.metricValueInt64);
break;
case CUPTI_METRIC_VALUE_KIND_PERCENT:
printf("Metric %s = %f%%\n", metricName, metricValue.metricValuePercent);
break;
case CUPTI_METRIC_VALUE_KIND_THROUGHPUT:
printf("Metric %s = %llu bytes/sec\n", metricName,
(unsigned long long)metricValue.metricValueThroughput);
break;
case CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL:
printf("Metric %s = utilization level %u\n", metricName,
(unsigned int)metricValue.metricValueUtilizationLevel);
break;
default:
fprintf(stderr, "error: unknown value kind\n");
exit(-1);
}
}
CUPTI_CALL(cuptiUnsubscribe(subscriber));
return 0;
}
|
1045040ef402e08b83e1b8419544584ad6b43d4a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator-(const hipComplex& a) {
return hipComplex(r-a.r, i-a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
__device__ hipComplex operator/(const hipComplex& a) {
return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ hipComplex conj(hipComplex m)
{
hipComplex out(m.r,-m.i);
return out;
}
__device__ hipComplex nor(hipComplex m)
{
hipComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(hipComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ hipComplex qpoch(hipComplex a, hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex qp(hipComplex a, hipComplex q, int n) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex ramphi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ hipComplex rampsi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ hipComplex ramchi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ hipComplex ramf(hipComplex a, hipComplex b) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex ma = mone*a;
hipComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ hipComplex expc(hipComplex m)
{
hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ hipComplex powc(hipComplex ag, hipComplex bg)
{
hipComplex out(0.0,0.0);
hipComplex mesp(0.0,0.0);
hipComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ hipComplex cosc(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.5,0.0);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ hipComplex sins(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.0,0.5);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ hipComplex tans(hipComplex m)
{
return sins(m)/cosc(m);
}
__device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z)
{
hipComplex out(0.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ hipComplex bnewt(hipComplex z) {
hipComplex three(3.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex Z =z;
hipComplex L(0.0,0.0);
hipComplex R(0.62348980185873359,0.7818314824680298);
hipComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ hipComplex they3(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex wahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ hipComplex dwahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ hipComplex they3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex h3ey3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex aut(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
hipComplex vel(0.0,0.0);
hipComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ hipComplex thess(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the1(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ hipComplex the2(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ hipComplex the3(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ hipComplex qin(hipComplex a, hipComplex q)
{
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ hipComplex geffa(hipComplex z, hipComplex q)
{
hipComplex out(0.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex wu(0.0,0.0);
hipComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ hipComplex thratd(hipComplex z, hipComplex q)
{
int n;
hipComplex fau(4.0,0.0);
hipComplex too(2.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex ennn(1.0,0.0);
hipComplex ni(-1.0,0.0);
hipComplex noo(-1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex loo = q;
hipComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ hipComplex thess4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ hipComplex thass(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex rogers( hipComplex q)
{
hipComplex onf(0.2,0.0);
hipComplex Q5 = q*q*q*q*q;
hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ hipComplex flat(hipComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
hipComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ hipComplex eff(hipComplex z, hipComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ hipComplex thete(float R, hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
hipComplex ann(1.0,0.0);
hipComplex bnn(1.0,0.0);
hipComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ hipComplex thetta(hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the hipComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ hipComplex mitlef(hipComplex z,hipComplex c)
{
hipComplex out(0.0,0.0);
hipComplex Z(1.0,0.0);
hipComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ hipComplex helva(hipComplex z)
{
hipComplex out(j0f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex alvir(hipComplex z)
{
hipComplex out(j0f(z.r),1.0/j1f(z.i));
return out;
}
__device__ hipComplex hexva(int m, hipComplex z)
{
hipComplex out(jnf(m,z.r),jnf(m,z.i));
return out;
}
__device__ hipComplex hilva(hipComplex z)
{
hipComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex ahilv(hipComplex z)
{
hipComplex out(1.0/j1f(z.r),1.0/j0f(z.i));
return out;
}
__device__ hipComplex halva(hipComplex z)
{
hipComplex out(j0f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex aciwa(hipComplex z)
{
hipComplex out(j0f(j1f(z.r)),j1f(j0f(z.i)));
return out;
}
__device__ hipComplex hinva(hipComplex z)
{
hipComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex henga(hipComplex z)
{
hipComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ hipComplex holva(hipComplex z)
{
hipComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ hipComplex aliva(hipComplex z)
{
hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i));
return out;
}
__device__ hipComplex ariva(hipComplex z)
{
hipComplex out(sinf(z.i),cbrtf(z.r));
return out;
}
__device__ hipComplex arago(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex irigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thy(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q)));
}
return out;
}
__device__ hipComplex urigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
hipComplex ip(pi,0.0);
const float scale = 10;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
hipComplex effx(fx,0.0);
hipComplex effy(fy,0.0);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
hipComplex mouse(LA,LB);
hipComplex moux(LA,0.0);
hipComplex mouy(0.0,LB);
hipComplex q(fx,fy);
/* hipComplex tik(sin(ticks/40.0f),0.0);*/
/* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
hipComplex fixon(.029348,.828934);
hipComplex faxon(.029348,-.828934);
hipComplex unity(1.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex aon = expc(ai*moux);
hipComplex uon= expc(mouy);
hipComplex flurn(0.0,0.0);
hipComplex accume(0.0,0.0);
hipComplex eccume(0.0,0.0);
hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
hipComplex cue = q;
hipComplex lam(0.73736887807831963, -0.67549029426152396);
hipComplex due(3.0,0.0);
hipComplex tir(2.0,0.0);
hipComplex selga(3.5,0.0);
hipComplex vro(-1.0,0.0);
hipComplex tle(1.0,0.0);
hipComplex sle(4.0,0.0);
hipComplex cherra(0.62348980185873359, 0.7818314824680298);
hipComplex lerra = cherra*cherra;
hipComplex ferra = lerra * cherra;
hipComplex terra = ferra * cherra;
hipComplex zerra = terra * cherra;
hipComplex nerra = zerra * cherra;
hipComplex vlarv(1/3.0,0.0);
hipComplex sugna(0.70710678118654757, 0.70710678118654746);
hipComplex regna(0.99966573338968745, 0.025853848581176047);
hipComplex spa(sqrtf(2.0),0.0);
hipComplex spb(sqrtf(3.0),0.0);
hipComplex spc(sqrtf(4.0),0.0);
hipComplex spd(sqrtf(5.0),0.0);
hipComplex mrun(1/2.0,0.0);
hipComplex gloon (4.0,0.0);
hipComplex plenod(-.01,0.0);
hipComplex nue = cue;
hipComplex rhuva(3.0,0.0);
hipComplex rarva(8.0,0.0);
hipComplex bor(-10.0,0.0);
hipComplex nat(0.0,-10.0);
hipComplex rhus(1.0,0.0);
hipComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
// almost Klein's j-invariant
//cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva);
for(v=0;v<2;v++)
{
/* stripes all over the place */
/*cue =cue -aon*ahilv(hilva(cue))-uon*hilva(ahilv(cue))/(uon*ahilv(hilva(q))-aon*ai*hilva(ahilv(cue)));*/
/*cue = (aon*hilva(cue)-uon*helva(cue))/(aon*halva(cue)-uon*hilva(cue));*/
cue =cue - hilva(cue)/(hilva(cue+aon*helva(cue))/ahilv(cue)-uon);
accume = accume + hilva(cue);
}
cue = accume;
cue=the3(cue,fixon*flat(cue));
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/
|
1045040ef402e08b83e1b8419544584ad6b43d4a.cu
|
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator-(const cuComplex& a) {
return cuComplex(r-a.r, i-a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
__device__ cuComplex operator/(const cuComplex& a) {
return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ cuComplex conj(cuComplex m)
{
cuComplex out(m.r,-m.i);
return out;
}
__device__ cuComplex nor(cuComplex m)
{
cuComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(cuComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ cuComplex qpoch(cuComplex a, cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex qp(cuComplex a, cuComplex q, int n) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex ramphi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ cuComplex rampsi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ cuComplex ramchi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ cuComplex ramf(cuComplex a, cuComplex b) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex ma = mone*a;
cuComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ cuComplex expc(cuComplex m)
{
cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ cuComplex powc(cuComplex ag, cuComplex bg)
{
cuComplex out(0.0,0.0);
cuComplex mesp(0.0,0.0);
cuComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ cuComplex cosc(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.5,0.0);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ cuComplex sins(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.0,0.5);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ cuComplex tans(cuComplex m)
{
return sins(m)/cosc(m);
}
__device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z)
{
cuComplex out(0.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ cuComplex bnewt(cuComplex z) {
cuComplex three(3.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex Z =z;
cuComplex L(0.0,0.0);
cuComplex R(0.62348980185873359,0.7818314824680298);
cuComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ cuComplex they3(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex wahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ cuComplex dwahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ cuComplex they3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex h3ey3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex aut(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
cuComplex vel(0.0,0.0);
cuComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ cuComplex thess(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the1(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ cuComplex the2(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ cuComplex the3(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ cuComplex qin(cuComplex a, cuComplex q)
{
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ cuComplex geffa(cuComplex z, cuComplex q)
{
cuComplex out(0.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex wu(0.0,0.0);
cuComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ cuComplex thratd(cuComplex z, cuComplex q)
{
int n;
cuComplex fau(4.0,0.0);
cuComplex too(2.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex ennn(1.0,0.0);
cuComplex ni(-1.0,0.0);
cuComplex noo(-1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex loo = q;
cuComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ cuComplex thess4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ cuComplex thass(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex rogers( cuComplex q)
{
cuComplex onf(0.2,0.0);
cuComplex Q5 = q*q*q*q*q;
cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ cuComplex flat(cuComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
cuComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ cuComplex eff(cuComplex z, cuComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ cuComplex thete(float R, cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
cuComplex ann(1.0,0.0);
cuComplex bnn(1.0,0.0);
cuComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ cuComplex thetta(cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the cuComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ cuComplex mitlef(cuComplex z,cuComplex c)
{
cuComplex out(0.0,0.0);
cuComplex Z(1.0,0.0);
cuComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ cuComplex helva(cuComplex z)
{
cuComplex out(j0f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex alvir(cuComplex z)
{
cuComplex out(j0f(z.r),1.0/j1f(z.i));
return out;
}
__device__ cuComplex hexva(int m, cuComplex z)
{
cuComplex out(jnf(m,z.r),jnf(m,z.i));
return out;
}
__device__ cuComplex hilva(cuComplex z)
{
cuComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex ahilv(cuComplex z)
{
cuComplex out(1.0/j1f(z.r),1.0/j0f(z.i));
return out;
}
__device__ cuComplex halva(cuComplex z)
{
cuComplex out(j0f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex aciwa(cuComplex z)
{
cuComplex out(j0f(j1f(z.r)),j1f(j0f(z.i)));
return out;
}
__device__ cuComplex hinva(cuComplex z)
{
cuComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex henga(cuComplex z)
{
cuComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ cuComplex holva(cuComplex z)
{
cuComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ cuComplex aliva(cuComplex z)
{
cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i));
return out;
}
__device__ cuComplex ariva(cuComplex z)
{
cuComplex out(sinf(z.i),cbrtf(z.r));
return out;
}
__device__ cuComplex arago(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex irigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thy(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q)));
}
return out;
}
__device__ cuComplex urigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
cuComplex ip(pi,0.0);
const float scale = 10;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
cuComplex effx(fx,0.0);
cuComplex effy(fy,0.0);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
cuComplex mouse(LA,LB);
cuComplex moux(LA,0.0);
cuComplex mouy(0.0,LB);
cuComplex q(fx,fy);
/* cuComplex tik(sin(ticks/40.0f),0.0);*/
/* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
cuComplex fixon(.029348,.828934);
cuComplex faxon(.029348,-.828934);
cuComplex unity(1.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex aon = expc(ai*moux);
cuComplex uon= expc(mouy);
cuComplex flurn(0.0,0.0);
cuComplex accume(0.0,0.0);
cuComplex eccume(0.0,0.0);
cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
cuComplex cue = q;
cuComplex lam(0.73736887807831963, -0.67549029426152396);
cuComplex due(3.0,0.0);
cuComplex tir(2.0,0.0);
cuComplex selga(3.5,0.0);
cuComplex vro(-1.0,0.0);
cuComplex tle(1.0,0.0);
cuComplex sle(4.0,0.0);
cuComplex cherra(0.62348980185873359, 0.7818314824680298);
cuComplex lerra = cherra*cherra;
cuComplex ferra = lerra * cherra;
cuComplex terra = ferra * cherra;
cuComplex zerra = terra * cherra;
cuComplex nerra = zerra * cherra;
cuComplex vlarv(1/3.0,0.0);
cuComplex sugna(0.70710678118654757, 0.70710678118654746);
cuComplex regna(0.99966573338968745, 0.025853848581176047);
cuComplex spa(sqrtf(2.0),0.0);
cuComplex spb(sqrtf(3.0),0.0);
cuComplex spc(sqrtf(4.0),0.0);
cuComplex spd(sqrtf(5.0),0.0);
cuComplex mrun(1/2.0,0.0);
cuComplex gloon (4.0,0.0);
cuComplex plenod(-.01,0.0);
cuComplex nue = cue;
cuComplex rhuva(3.0,0.0);
cuComplex rarva(8.0,0.0);
cuComplex bor(-10.0,0.0);
cuComplex nat(0.0,-10.0);
cuComplex rhus(1.0,0.0);
cuComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
// almost Klein's j-invariant
//cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva);
for(v=0;v<2;v++)
{
/* stripes all over the place */
/*cue =cue -aon*ahilv(hilva(cue))-uon*hilva(ahilv(cue))/(uon*ahilv(hilva(q))-aon*ai*hilva(ahilv(cue)));*/
/*cue = (aon*hilva(cue)-uon*helva(cue))/(aon*halva(cue)-uon*hilva(cue));*/
cue =cue - hilva(cue)/(hilva(cue+aon*helva(cue))/ahilv(cue)-uon);
accume = accume + hilva(cue);
}
cue = accume;
cue=the3(cue,fixon*flat(cue));
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/
|
bfdde1b52e73e41cd78d582f3cc84af798fc5a18.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "kernel.h"
template <typename T_BBOX, unsigned nthds_per_cta>
__launch_bounds__(nthds_per_cta)
__global__ void decodeBBoxes_kernel(
const int nthreads,
const CodeTypeSSD code_type,
const bool variance_encoded_in_target,
const int num_priors,
const bool share_location,
const int num_loc_classes,
const int background_label_id,
const bool clip_bbox,
const T_BBOX* loc_data,
const T_BBOX* prior_data,
T_BBOX* bbox_data)
{
for (int index = blockIdx.x * nthds_per_cta + threadIdx.x;
index < nthreads;
index += nthds_per_cta * gridDim.x)
{
// Bounding box coordinate index {0, 1, 2, 3}
const int i = index % 4;
// Bounding box class index
const int c = (index / 4) % num_loc_classes;
// Prior box id corresponding to the bounding box
const int d = (index / 4 / num_loc_classes) % num_priors;
// If bounding box was not shared among all the classes and the bounding box is corresponding to the background class
if (!share_location && c == background_label_id)
{
// Ignore background class if not share_location.
return;
}
// Index to the right anchor box corresponding to the current bounding box
const int pi = d * 4;
// Index to the right variances corresponding to the current bounding box
const int vi = pi + num_priors * 4;
// Encoding method: CodeTypeSSD::CORNER
//if (code_type == PriorBoxParameter_CodeType_CORNER){
if (code_type == CodeTypeSSD::CORNER)
{
// Do not want to use variances to adjust the bounding box decoding
if (variance_encoded_in_target)
{
// variance is encoded in target, we simply need to add the offset
// predictions.
// prior_data[pi + i]: prior box coordinates corresponding to the current bounding box coordinate
bbox_data[index] = prior_data[pi + i] + loc_data[index];
}
else
{
// variance is encoded in bbox, we need to scale the offset accordingly.
// prior_data[vi + i]: variance corresponding to the current bounding box coordinate
bbox_data[index] = prior_data[pi + i] + loc_data[index] * prior_data[vi + i];
}
//} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
}
// Encoding method: CodeTypeSSD::CENTER_SIZE
else if (code_type == CodeTypeSSD::CENTER_SIZE)
{
// Get prior box coordinates
const T_BBOX p_xmin = prior_data[pi];
const T_BBOX p_ymin = prior_data[pi + 1];
const T_BBOX p_xmax = prior_data[pi + 2];
const T_BBOX p_ymax = prior_data[pi + 3];
// Calculate prior box center, height, and width
const T_BBOX prior_width = p_xmax - p_xmin;
const T_BBOX prior_height = p_ymax - p_ymin;
const T_BBOX prior_center_x = (p_xmin + p_xmax) / 2.;
const T_BBOX prior_center_y = (p_ymin + p_ymax) / 2.;
// Get the current bounding box coordinates
const T_BBOX xmin = loc_data[index - i];
const T_BBOX ymin = loc_data[index - i + 1];
const T_BBOX xmax = loc_data[index - i + 2];
const T_BBOX ymax = loc_data[index - i + 3];
// Declare decoded bounding box coordinates
T_BBOX decode_bbox_center_x, decode_bbox_center_y;
T_BBOX decode_bbox_width, decode_bbox_height;
// Do not want to use variances to adjust the bounding box decoding
if (variance_encoded_in_target)
{
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = xmin * prior_width + prior_center_x;
decode_bbox_center_y = ymin * prior_height + prior_center_y;
decode_bbox_width = exp(xmax) * prior_width;
decode_bbox_height = exp(ymax) * prior_height;
}
else
{
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x = prior_data[vi] * xmin * prior_width + prior_center_x;
decode_bbox_center_y = prior_data[vi + 1] * ymin * prior_height + prior_center_y;
decode_bbox_width = exp(prior_data[vi + 2] * xmax) * prior_width;
decode_bbox_height = exp(prior_data[vi + 3] * ymax) * prior_height;
}
// Use [x_topleft, y_topleft, x_bottomright, y_bottomright] as coordinates for final decoded bounding box output
switch (i)
{
case 0:
bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.;
break;
case 1:
bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.;
break;
case 2:
bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.;
break;
case 3:
bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.;
break;
}
//} else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) {
}
// Encoding method: CodeTypeSSD::CORNER_SIZE
else if (code_type == CodeTypeSSD::CORNER_SIZE)
{
// Get prior box coordinates
const T_BBOX p_xmin = prior_data[pi];
const T_BBOX p_ymin = prior_data[pi + 1];
const T_BBOX p_xmax = prior_data[pi + 2];
const T_BBOX p_ymax = prior_data[pi + 3];
// Get prior box width and height
const T_BBOX prior_width = p_xmax - p_xmin;
const T_BBOX prior_height = p_ymax - p_ymin;
T_BBOX p_size;
if (i == 0 || i == 2)
{
p_size = prior_width;
}
else
{
p_size = prior_height;
}
// Do not want to use variances to adjust the bounding box decoding
if (variance_encoded_in_target)
{
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index] * p_size;
}
else
{
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] = prior_data[pi + i] + loc_data[index] * prior_data[vi + i] * p_size;
}
}
// Exactly the same to CodeTypeSSD::CENTER_SIZE with using variance to adjust the bounding box decoding
else if (code_type == CodeTypeSSD::TF_CENTER)
{
const T_BBOX pXmin = prior_data[pi];
const T_BBOX pYmin = prior_data[pi + 1];
const T_BBOX pXmax = prior_data[pi + 2];
const T_BBOX pYmax = prior_data[pi + 3];
const T_BBOX priorWidth = pXmax - pXmin;
const T_BBOX priorHeight = pYmax - pYmin;
const T_BBOX priorCenterX = (pXmin + pXmax) / 2.;
const T_BBOX priorCenterY = (pYmin + pYmax) / 2.;
const T_BBOX ymin = loc_data[index - i];
const T_BBOX xmin = loc_data[index - i + 1];
const T_BBOX ymax = loc_data[index - i + 2];
const T_BBOX xmax = loc_data[index - i + 3];
T_BBOX bboxCenterX, bboxCenterY;
T_BBOX bboxWidth, bboxHeight;
bboxCenterX = prior_data[vi] * xmin * priorWidth + priorCenterX;
bboxCenterY = prior_data[vi + 1] * ymin * priorHeight + priorCenterY;
bboxWidth = exp(prior_data[vi + 2] * xmax) * priorWidth;
bboxHeight = exp(prior_data[vi + 3] * ymax) * priorHeight;
switch (i)
{
case 0:
bbox_data[index] = bboxCenterX - bboxWidth / 2.;
break;
case 1:
bbox_data[index] = bboxCenterY - bboxHeight / 2.;
break;
case 2:
bbox_data[index] = bboxCenterX + bboxWidth / 2.;
break;
case 3:
bbox_data[index] = bboxCenterY + bboxHeight / 2.;
break;
}
}
else
{
// Unknown code type.
assert("Unknown Box decode code type");
}
// Clip bounding box or not
if (clip_bbox)
{
bbox_data[index] = max(min(bbox_data[index], T_BBOX(1.)), T_BBOX(0.));
}
}
}
template <typename T_BBOX>
pluginStatus_t decodeBBoxes_gpu(
hipStream_t stream,
const int nthreads,
const CodeTypeSSD code_type,
const bool variance_encoded_in_target,
const int num_priors,
const bool share_location,
const int num_loc_classes,
const int background_label_id,
const bool clip_bbox,
const void* loc_data,
const void* prior_data,
void* bbox_data)
{
const int BS = 512;
const int GS = (nthreads + BS - 1) / BS;
hipLaunchKernelGGL(( decodeBBoxes_kernel<T_BBOX, BS>), dim3(GS), dim3(BS), 0, stream, nthreads, code_type, variance_encoded_in_target,
num_priors, share_location, num_loc_classes,
background_label_id, clip_bbox,
(const T_BBOX*) loc_data, (const T_BBOX*) prior_data,
(T_BBOX*) bbox_data);
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// decodeBBoxes LAUNCH CONFIG
typedef pluginStatus_t (*dbbFunc)(hipStream_t,
const int,
const CodeTypeSSD,
const bool,
const int,
const bool,
const int,
const int,
const bool,
const void*,
const void*,
void*);
struct dbbLaunchConfig
{
DataType t_bbox;
dbbFunc function;
dbbLaunchConfig(DataType t_bbox)
: t_bbox(t_bbox)
{
}
dbbLaunchConfig(DataType t_bbox, dbbFunc function)
: t_bbox(t_bbox)
, function(function)
{
}
bool operator==(const dbbLaunchConfig& other)
{
return t_bbox == other.t_bbox;
}
};
static std::vector<dbbLaunchConfig> dbbFuncVec;
bool decodeBBoxesInit()
{
dbbFuncVec.push_back(dbbLaunchConfig(DataType::kFLOAT,
decodeBBoxes_gpu<float>));
return true;
}
static bool initialized = decodeBBoxesInit();
pluginStatus_t decodeBBoxes(
hipStream_t stream,
const int nthreads,
const CodeTypeSSD code_type,
const bool variance_encoded_in_target,
const int num_priors,
const bool share_location,
const int num_loc_classes,
const int background_label_id,
const bool clip_bbox,
const DataType DT_BBOX,
const void* loc_data,
const void* prior_data,
void* bbox_data)
{
dbbLaunchConfig lc = dbbLaunchConfig(DT_BBOX);
for (unsigned i = 0; i < dbbFuncVec.size(); ++i)
{
if (lc == dbbFuncVec[i])
{
DEBUG_PRINTF("decodeBBox kernel %d\n", i);
return dbbFuncVec[i].function(stream,
nthreads,
code_type,
variance_encoded_in_target,
num_priors,
share_location,
num_loc_classes,
background_label_id,
clip_bbox,
loc_data,
prior_data,
bbox_data);
}
}
return STATUS_BAD_PARAM;
}
|
bfdde1b52e73e41cd78d582f3cc84af798fc5a18.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "kernel.h"
template <typename T_BBOX, unsigned nthds_per_cta>
__launch_bounds__(nthds_per_cta)
__global__ void decodeBBoxes_kernel(
const int nthreads,
const CodeTypeSSD code_type,
const bool variance_encoded_in_target,
const int num_priors,
const bool share_location,
const int num_loc_classes,
const int background_label_id,
const bool clip_bbox,
const T_BBOX* loc_data,
const T_BBOX* prior_data,
T_BBOX* bbox_data)
{
for (int index = blockIdx.x * nthds_per_cta + threadIdx.x;
index < nthreads;
index += nthds_per_cta * gridDim.x)
{
// Bounding box coordinate index {0, 1, 2, 3}
const int i = index % 4;
// Bounding box class index
const int c = (index / 4) % num_loc_classes;
// Prior box id corresponding to the bounding box
const int d = (index / 4 / num_loc_classes) % num_priors;
// If bounding box was not shared among all the classes and the bounding box is corresponding to the background class
if (!share_location && c == background_label_id)
{
// Ignore background class if not share_location.
return;
}
// Index to the right anchor box corresponding to the current bounding box
const int pi = d * 4;
// Index to the right variances corresponding to the current bounding box
const int vi = pi + num_priors * 4;
// Encoding method: CodeTypeSSD::CORNER
//if (code_type == PriorBoxParameter_CodeType_CORNER){
if (code_type == CodeTypeSSD::CORNER)
{
// Do not want to use variances to adjust the bounding box decoding
if (variance_encoded_in_target)
{
// variance is encoded in target, we simply need to add the offset
// predictions.
// prior_data[pi + i]: prior box coordinates corresponding to the current bounding box coordinate
bbox_data[index] = prior_data[pi + i] + loc_data[index];
}
else
{
// variance is encoded in bbox, we need to scale the offset accordingly.
// prior_data[vi + i]: variance corresponding to the current bounding box coordinate
bbox_data[index] = prior_data[pi + i] + loc_data[index] * prior_data[vi + i];
}
//} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
}
// Encoding method: CodeTypeSSD::CENTER_SIZE
else if (code_type == CodeTypeSSD::CENTER_SIZE)
{
// Get prior box coordinates
const T_BBOX p_xmin = prior_data[pi];
const T_BBOX p_ymin = prior_data[pi + 1];
const T_BBOX p_xmax = prior_data[pi + 2];
const T_BBOX p_ymax = prior_data[pi + 3];
// Calculate prior box center, height, and width
const T_BBOX prior_width = p_xmax - p_xmin;
const T_BBOX prior_height = p_ymax - p_ymin;
const T_BBOX prior_center_x = (p_xmin + p_xmax) / 2.;
const T_BBOX prior_center_y = (p_ymin + p_ymax) / 2.;
// Get the current bounding box coordinates
const T_BBOX xmin = loc_data[index - i];
const T_BBOX ymin = loc_data[index - i + 1];
const T_BBOX xmax = loc_data[index - i + 2];
const T_BBOX ymax = loc_data[index - i + 3];
// Declare decoded bounding box coordinates
T_BBOX decode_bbox_center_x, decode_bbox_center_y;
T_BBOX decode_bbox_width, decode_bbox_height;
// Do not want to use variances to adjust the bounding box decoding
if (variance_encoded_in_target)
{
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = xmin * prior_width + prior_center_x;
decode_bbox_center_y = ymin * prior_height + prior_center_y;
decode_bbox_width = exp(xmax) * prior_width;
decode_bbox_height = exp(ymax) * prior_height;
}
else
{
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x = prior_data[vi] * xmin * prior_width + prior_center_x;
decode_bbox_center_y = prior_data[vi + 1] * ymin * prior_height + prior_center_y;
decode_bbox_width = exp(prior_data[vi + 2] * xmax) * prior_width;
decode_bbox_height = exp(prior_data[vi + 3] * ymax) * prior_height;
}
// Use [x_topleft, y_topleft, x_bottomright, y_bottomright] as coordinates for final decoded bounding box output
switch (i)
{
case 0:
bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.;
break;
case 1:
bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.;
break;
case 2:
bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.;
break;
case 3:
bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.;
break;
}
//} else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) {
}
// Encoding method: CodeTypeSSD::CORNER_SIZE
else if (code_type == CodeTypeSSD::CORNER_SIZE)
{
// Get prior box coordinates
const T_BBOX p_xmin = prior_data[pi];
const T_BBOX p_ymin = prior_data[pi + 1];
const T_BBOX p_xmax = prior_data[pi + 2];
const T_BBOX p_ymax = prior_data[pi + 3];
// Get prior box width and height
const T_BBOX prior_width = p_xmax - p_xmin;
const T_BBOX prior_height = p_ymax - p_ymin;
T_BBOX p_size;
if (i == 0 || i == 2)
{
p_size = prior_width;
}
else
{
p_size = prior_height;
}
// Do not want to use variances to adjust the bounding box decoding
if (variance_encoded_in_target)
{
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index] * p_size;
}
else
{
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] = prior_data[pi + i] + loc_data[index] * prior_data[vi + i] * p_size;
}
}
// Exactly the same to CodeTypeSSD::CENTER_SIZE with using variance to adjust the bounding box decoding
else if (code_type == CodeTypeSSD::TF_CENTER)
{
const T_BBOX pXmin = prior_data[pi];
const T_BBOX pYmin = prior_data[pi + 1];
const T_BBOX pXmax = prior_data[pi + 2];
const T_BBOX pYmax = prior_data[pi + 3];
const T_BBOX priorWidth = pXmax - pXmin;
const T_BBOX priorHeight = pYmax - pYmin;
const T_BBOX priorCenterX = (pXmin + pXmax) / 2.;
const T_BBOX priorCenterY = (pYmin + pYmax) / 2.;
const T_BBOX ymin = loc_data[index - i];
const T_BBOX xmin = loc_data[index - i + 1];
const T_BBOX ymax = loc_data[index - i + 2];
const T_BBOX xmax = loc_data[index - i + 3];
T_BBOX bboxCenterX, bboxCenterY;
T_BBOX bboxWidth, bboxHeight;
bboxCenterX = prior_data[vi] * xmin * priorWidth + priorCenterX;
bboxCenterY = prior_data[vi + 1] * ymin * priorHeight + priorCenterY;
bboxWidth = exp(prior_data[vi + 2] * xmax) * priorWidth;
bboxHeight = exp(prior_data[vi + 3] * ymax) * priorHeight;
switch (i)
{
case 0:
bbox_data[index] = bboxCenterX - bboxWidth / 2.;
break;
case 1:
bbox_data[index] = bboxCenterY - bboxHeight / 2.;
break;
case 2:
bbox_data[index] = bboxCenterX + bboxWidth / 2.;
break;
case 3:
bbox_data[index] = bboxCenterY + bboxHeight / 2.;
break;
}
}
else
{
// Unknown code type.
assert("Unknown Box decode code type");
}
// Clip bounding box or not
if (clip_bbox)
{
bbox_data[index] = max(min(bbox_data[index], T_BBOX(1.)), T_BBOX(0.));
}
}
}
template <typename T_BBOX>
pluginStatus_t decodeBBoxes_gpu(
cudaStream_t stream,
const int nthreads,
const CodeTypeSSD code_type,
const bool variance_encoded_in_target,
const int num_priors,
const bool share_location,
const int num_loc_classes,
const int background_label_id,
const bool clip_bbox,
const void* loc_data,
const void* prior_data,
void* bbox_data)
{
const int BS = 512;
const int GS = (nthreads + BS - 1) / BS;
decodeBBoxes_kernel<T_BBOX, BS><<<GS, BS, 0, stream>>>(nthreads, code_type, variance_encoded_in_target,
num_priors, share_location, num_loc_classes,
background_label_id, clip_bbox,
(const T_BBOX*) loc_data, (const T_BBOX*) prior_data,
(T_BBOX*) bbox_data);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// decodeBBoxes LAUNCH CONFIG
typedef pluginStatus_t (*dbbFunc)(cudaStream_t,
const int,
const CodeTypeSSD,
const bool,
const int,
const bool,
const int,
const int,
const bool,
const void*,
const void*,
void*);
struct dbbLaunchConfig
{
DataType t_bbox;
dbbFunc function;
dbbLaunchConfig(DataType t_bbox)
: t_bbox(t_bbox)
{
}
dbbLaunchConfig(DataType t_bbox, dbbFunc function)
: t_bbox(t_bbox)
, function(function)
{
}
bool operator==(const dbbLaunchConfig& other)
{
return t_bbox == other.t_bbox;
}
};
static std::vector<dbbLaunchConfig> dbbFuncVec;
bool decodeBBoxesInit()
{
dbbFuncVec.push_back(dbbLaunchConfig(DataType::kFLOAT,
decodeBBoxes_gpu<float>));
return true;
}
static bool initialized = decodeBBoxesInit();
pluginStatus_t decodeBBoxes(
cudaStream_t stream,
const int nthreads,
const CodeTypeSSD code_type,
const bool variance_encoded_in_target,
const int num_priors,
const bool share_location,
const int num_loc_classes,
const int background_label_id,
const bool clip_bbox,
const DataType DT_BBOX,
const void* loc_data,
const void* prior_data,
void* bbox_data)
{
dbbLaunchConfig lc = dbbLaunchConfig(DT_BBOX);
for (unsigned i = 0; i < dbbFuncVec.size(); ++i)
{
if (lc == dbbFuncVec[i])
{
DEBUG_PRINTF("decodeBBox kernel %d\n", i);
return dbbFuncVec[i].function(stream,
nthreads,
code_type,
variance_encoded_in_target,
num_priors,
share_location,
num_loc_classes,
background_label_id,
clip_bbox,
loc_data,
prior_data,
bbox_data);
}
}
return STATUS_BAD_PARAM;
}
|
607a332c0738a72d48ed820ad5748ac31ae2daa7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Parallel bitonic sort using CUDA.
* Compile with
* nvcc -arch=sm_11 bitonic_sort.cu
* Based on http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm
* License: BSD 3
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
/* Every thread gets exactly one value in the unsorted array. */
#define THREADS 128 // 2^8
#define BLOCKS 8192 // 2^14
#define NUM_VALS THREADS *BLOCKS
void print_elapsed(clock_t start, clock_t stop)
{
double elapsed = ((double)(stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
}
float random_float()
{
return (float)rand() / (float)RAND_MAX;
}
void array_print(float *arr, int length)
{
int i;
for (i = 0; i < length; ++i)
{
printf("%1.3f ", arr[i]);
}
printf("\n");
}
void array_fill(float *arr, int length)
{
srand(time(NULL));
int i;
for (i = 0; i < length; ++i)
{
arr[i] = random_float();
}
}
__global__ void bitonic_sort_step(float *dev_values, int j, int k)
{
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i ^ j;
/* The threads with the lowest ids sort the array. */
if ((ixj) > i)
{
if ((i & k) == 0)
{
/* Sort ascending */
if (dev_values[i] > dev_values[ixj])
{
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i & k) != 0)
{
/* Sort descending */
if (dev_values[i] < dev_values[ixj])
{
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
/**
* Inplace bitonic sort using CUDA.
*/
void bitonic_sort(float *values)
{
float *dev_values;
size_t size = NUM_VALS * sizeof(float);
hipMalloc((void **)&dev_values, size);
hipMemcpy(dev_values, values, size, hipMemcpyHostToDevice);
dim3 blocks(BLOCKS, 1); /* Number of blocks */
dim3 threads(THREADS, 1); /* Number of threads */
int j, k;
/* Major step */
for (k = 2; k <= NUM_VALS; k <<= 1)
{
/* Minor step */
for (j = k >> 1; j > 0; j = j >> 1)
{
hipLaunchKernelGGL(( bitonic_sort_step), dim3(blocks), dim3(threads), 0, 0, dev_values, j, k);
}
}
hipMemcpy(values, dev_values, size, hipMemcpyDeviceToHost);
hipFree(dev_values);
}
void writefile(char *filename, float *buffer, int num)
{
FILE *fp;
fp = fopen(filename, "w");
for (int j = 0; j < num; j++)
{
fprintf(fp, "%0.0f\n", *(buffer + j));
}
fclose(fp);
}
int main(int argc, char *argv[])
{
clock_t start, stop;
if (argc != 3)
{
printf("Invalid argument count. %s accepts 1-4 arguments, %d given\n",
argv[0], argc);
return -1;
}
float *values = (float *)malloc(NUM_VALS * sizeof(float));
// array_fill(values, NUM_VALS);
if (values == NULL)
{
printf("Insufficient host memory to allocate at %d", __LINE__);
return -3;
}
start = clock();
FILE *fin = fopen(argv[1], "r");
for (int i = 0; i < NUM_VALS; i++)
{
if (EOF == fscanf(fin, "%f ", &values[i]))
{
break;
}
}
bitonic_sort(values); /* Inplace */
writefile(argv[2], values, NUM_VALS);
stop = clock();
print_elapsed(start, stop);
free(values);
}
|
607a332c0738a72d48ed820ad5748ac31ae2daa7.cu
|
/*
* Parallel bitonic sort using CUDA.
* Compile with
* nvcc -arch=sm_11 bitonic_sort.cu
* Based on http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm
* License: BSD 3
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
/* Every thread gets exactly one value in the unsorted array. */
#define THREADS 128 // 2^8
#define BLOCKS 8192 // 2^14
#define NUM_VALS THREADS *BLOCKS
void print_elapsed(clock_t start, clock_t stop)
{
double elapsed = ((double)(stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
}
float random_float()
{
return (float)rand() / (float)RAND_MAX;
}
void array_print(float *arr, int length)
{
int i;
for (i = 0; i < length; ++i)
{
printf("%1.3f ", arr[i]);
}
printf("\n");
}
void array_fill(float *arr, int length)
{
srand(time(NULL));
int i;
for (i = 0; i < length; ++i)
{
arr[i] = random_float();
}
}
__global__ void bitonic_sort_step(float *dev_values, int j, int k)
{
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i ^ j;
/* The threads with the lowest ids sort the array. */
if ((ixj) > i)
{
if ((i & k) == 0)
{
/* Sort ascending */
if (dev_values[i] > dev_values[ixj])
{
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i & k) != 0)
{
/* Sort descending */
if (dev_values[i] < dev_values[ixj])
{
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
/**
* Inplace bitonic sort using CUDA.
*/
void bitonic_sort(float *values)
{
float *dev_values;
size_t size = NUM_VALS * sizeof(float);
cudaMalloc((void **)&dev_values, size);
cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice);
dim3 blocks(BLOCKS, 1); /* Number of blocks */
dim3 threads(THREADS, 1); /* Number of threads */
int j, k;
/* Major step */
for (k = 2; k <= NUM_VALS; k <<= 1)
{
/* Minor step */
for (j = k >> 1; j > 0; j = j >> 1)
{
bitonic_sort_step<<<blocks, threads>>>(dev_values, j, k);
}
}
cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost);
cudaFree(dev_values);
}
void writefile(char *filename, float *buffer, int num)
{
FILE *fp;
fp = fopen(filename, "w");
for (int j = 0; j < num; j++)
{
fprintf(fp, "%0.0f\n", *(buffer + j));
}
fclose(fp);
}
int main(int argc, char *argv[])
{
clock_t start, stop;
if (argc != 3)
{
printf("Invalid argument count. %s accepts 1-4 arguments, %d given\n",
argv[0], argc);
return -1;
}
float *values = (float *)malloc(NUM_VALS * sizeof(float));
// array_fill(values, NUM_VALS);
if (values == NULL)
{
printf("Insufficient host memory to allocate at %d", __LINE__);
return -3;
}
start = clock();
FILE *fin = fopen(argv[1], "r");
for (int i = 0; i < NUM_VALS; i++)
{
if (EOF == fscanf(fin, "%f ", &values[i]))
{
break;
}
}
bitonic_sort(values); /* Inplace */
writefile(argv[2], values, NUM_VALS);
stop = clock();
print_elapsed(start, stop);
free(values);
}
|
cb5f7fc506b63f1fa48e059ffc144c88783b2558.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include "err_handler.h"
#include "filter_db.h"
#include "util.h"
#include <sys/time.h>
#include <fstream>
#include <iostream>
#include <cuda_runtime.h>
#define BUF_SZ 256
using namespace std;
clsFilterDB db;
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), __FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
__inline__ __device__ int warpReduceSum(int val)
{
for(int offset = warpSize/2; offset > 0; offset /= 2)
{
val += __shfl_down(val, offset);
//printf("%d\t", val);
}
return val;
}
__inline__ __device__ int blockReduceSum(int val)
{
static __shared__ int shared[64];
//static __shared__ int shared[12*1024];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSum(val); // warp partial sum
if(lane == 0) shared[wid] = val; // store each warp partial sum
__syncthreads();
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if(wid==0) val = warpReduceSum(val);
return val;
}
__inline__ __host__ __device__ UINT64 genPrefix(UINT64 prefix, unsigned char len)
{
register UINT64 result;
register UINT mask_m;
if (len == 0)
return(0);
else{
// mask = 0xFFFFFFFFFFFFFFFFLL << (64-len);
mask_m = 0xFFFFFFFF << (32-len);
result = prefix & mask_m;
return(result);
}
}
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec*1e6 + (double)tp.tv_usec);
}
void bubble_sort(clsFilterDB item, int len)
{
int i, j;
filter temp;
for(i = 0; i < len - 1; i++)
for(j = 0; j < len - i - 1; j++)
{
if(item.filterSet_[j].destIPLen_ > item.filterSet_[j+1].destIPLen_)
{
//printf("1.srcIP: %d\n", item.filterSet_[j].srcIPLen_);
temp = item.filterSet_[j];
item.filterSet_[j] = item.filterSet_[j+1];
item.filterSet_[j+1] = temp;
//printf("2.srcIP: %d\n", item.filterSet_[j].srcIPLen_);
}
if(item.filterSet_[j].srcIPLen_ > item.filterSet_[j+1].srcIPLen_)
{
//printf("1.srcIP: %d\n", item.filterSet_[j].srcIPLen_);
temp = item.filterSet_[j];
item.filterSet_[j] = item.filterSet_[j+1];
item.filterSet_[j+1] = temp;
//printf("2.srcIP: %d\n", item.filterSet_[j].srcIPLen_);
}
}
}
// distribute filters before sorting, to thread by order. (approach 0)
__global__ void gpu_conflict_detect_0(filter * const __restrict__ filterSet, int rule_size, int* d_total)
{
//printf("blockIdx.x: %d, threadIdx.x: %d\n", blockIdx.x, threadIdx.x);
unsigned short threadID = blockDim.x * blockIdx.x + threadIdx.x;
unsigned short threadSize = gridDim.x * blockDim.x;
unsigned short base = rule_size/threadSize+1;
unsigned short start = threadID*base;
unsigned int total = 0;
//printf("threadSize:%d,%d\n", threadSize, rule_size/threadSize + 1);
if(start > rule_size) return;
//int k = 0;
for (unsigned short i=start; i<(start+base) && i<rule_size ; i++) {
//for(int i=threadID; i<rule_size; i+=threadSize){
//k = rule_size - i;
#pragma unroll
for (unsigned short j= 0; j<rule_size; j++) {
//if(j>=i) return;
if( filterSet[i].srcIPLen_ > filterSet[j].srcIPLen_ ){
if( genPrefix(filterSet[i].srcIP_, filterSet[j].srcIPLen_) == filterSet[j].srcIP_ ){
if(( (!filterSet[i].pro_num_ || !filterSet[j].pro_num_) || (filterSet[i].pro_num_ == filterSet[j].pro_num_ )) ) {
if (
(
((filterSet[i].destPortleft_ <= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ >= filterSet[j].destPortleft_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ <= filterSet[j].destPortright_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_))
)
&&
(
((filterSet[i].srcPortleft_ <= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ >= filterSet[j].srcPortleft_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ <= filterSet[j].srcPortright_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_))
)
)
{
if (filterSet[i].destIPLen_ > filterSet[j].destIPLen_ ){
if ( genPrefix(filterSet[i].destIP_, filterSet[j].destIPLen_) == filterSet[j].destIP_){
//counter1++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal++;
//devTotal[threadID].result++;
total++;
}
}
if (filterSet[i].destIPLen_ < filterSet[j].destIPLen_){
if ( genPrefix(filterSet[j].destIP_, filterSet[i].destIPLen_) == filterSet[i].destIP_){
//counter2++;
/*
if(
( (rule.filterSet_[i].destPortleft_ <= rule.filterSet_[j].destPortleft_) && (rule.filterSet_[i].destPortright_ >= rule.filterSet_[j].destPortright_) ) &&
( (rule.filterSet_[i].srcPortleft_ <= rule.filterSet_[j].srcPortleft_) && (rule.filterSet_[i].srcPortright_ >= rule.filterSet_[j].srcPortright_) ) &&
( (rule.filterSet_[i].pro_num_ == rule.filterSet_[j].pro_num_) || (rule.filterSet_[i].pro_num_ == 0) )
){}
else
*/
//devTotal++;
//devTotal[threadID].result++;
total++;
}
}
if (filterSet[i].destIPLen_ == filterSet[j].destIPLen_){
if ( filterSet[i].destIP_ == filterSet[j].destIP_){
//counter3++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal++;
//devTotal[threadID].result++;
total++;
}
}
}
}
}
}
}
}
total = blockReduceSum(total);
if(threadIdx.x == 0)
d_total[blockIdx.x] = total;
}
// distribute filters after sorting, to thread by order. (approach 1)
__global__ void gpu_conflict_detect_1(filter * const __restrict__ filterSet, int rule_size, int* d_total)
{
//printf("blockIdx.x: %d, threadIdx.x: %d\n", blockIdx.x, threadIdx.x);
unsigned short threadID = blockDim.x * blockIdx.x + threadIdx.x;
unsigned short threadSize = gridDim.x * blockDim.x;
unsigned short base = rule_size/threadSize+1;
unsigned short start = threadID*base;
unsigned int total = 0;
//printf("threadSize:%d,%d\n", threadSize, rule_size/threadSize + 1);
if(start > rule_size) return;
//int k = 0;
for (unsigned short i=start; i<(start+base) && i<rule_size ; i++) {
//for(int i=threadID; i<rule_size; i+=threadSize){
//k = rule_size - i;
#pragma unroll
for (unsigned short j= 0; j<i; j++) {
//if(j>=i) return;
if( filterSet[i].srcIPLen_ > filterSet[j].srcIPLen_ ){
if( genPrefix(filterSet[i].srcIP_, filterSet[j].srcIPLen_) == filterSet[j].srcIP_ ){
if(( (!filterSet[i].pro_num_ || !filterSet[j].pro_num_) || (filterSet[i].pro_num_ == filterSet[j].pro_num_ )) ) {
if (
(
((filterSet[i].destPortleft_ <= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ >= filterSet[j].destPortleft_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ <= filterSet[j].destPortright_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_))
)
&&
(
((filterSet[i].srcPortleft_ <= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ >= filterSet[j].srcPortleft_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ <= filterSet[j].srcPortright_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_))
)
)
{
if (filterSet[i].destIPLen_ > filterSet[j].destIPLen_ ){
if ( genPrefix(filterSet[i].destIP_, filterSet[j].destIPLen_) == filterSet[j].destIP_){
//counter1++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal++;
//devTotal[threadID].result++;
total++;
}
}
if (filterSet[i].destIPLen_ < filterSet[j].destIPLen_){
if ( genPrefix(filterSet[j].destIP_, filterSet[i].destIPLen_) == filterSet[i].destIP_){
//counter2++;
/*
if(
( (rule.filterSet_[i].destPortleft_ <= rule.filterSet_[j].destPortleft_) && (rule.filterSet_[i].destPortright_ >= rule.filterSet_[j].destPortright_) ) &&
( (rule.filterSet_[i].srcPortleft_ <= rule.filterSet_[j].srcPortleft_) && (rule.filterSet_[i].srcPortright_ >= rule.filterSet_[j].srcPortright_) ) &&
( (rule.filterSet_[i].pro_num_ == rule.filterSet_[j].pro_num_) || (rule.filterSet_[i].pro_num_ == 0) )
){}
else
*/
//devTotal++;
//devTotal[threadID].result++;
total++;
}
}
if (filterSet[i].destIPLen_ == filterSet[j].destIPLen_){
if ( filterSet[i].destIP_ == filterSet[j].destIP_){
//counter3++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal++;
//devTotal[threadID].result++;
total++;
}
}
}
}
}
}
}
}
total = blockReduceSum(total);
if(threadIdx.x == 0)
d_total[blockIdx.x] = total;
}
// base on new, distribute filters to thread by order. (approach 1)
__global__ void gpu_conflict_detect_2(filter * const __restrict__ filterSet, int rule_size, int* d_total)
{
///printf("blockIdx.x: %d, threadIdx.x: %d\n", blockIdx.x, threadIdx.x);
unsigned short start = threadIdx.x * gridDim.x + blockIdx.x;
unsigned short threadSize = gridDim.x * blockDim.x;
int total = 0;
//printf("threadSize:%d,%d\n", threadSize, rule_size/threadSize + 1);
if(start > rule_size) return;
for (unsigned short i=start; i<rule_size; i+=threadSize) {
//for(int i=threadID; i<rule_size; i+=threadSize){
//devTotal[threadIndex].set = 1;
#pragma unroll
for (unsigned short j= 0; j<i; j++) {
//if(j>=i) break;
if( filterSet[i].srcIPLen_ > filterSet[j].srcIPLen_ ){
if( genPrefix(filterSet[i].srcIP_, filterSet[j].srcIPLen_) == filterSet[j].srcIP_ ){
if(( (!filterSet[i].pro_num_ || !filterSet[j].pro_num_) || (filterSet[i].pro_num_ == filterSet[j].pro_num_ )) ) {
if (
(
((filterSet[i].destPortleft_ <= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ >= filterSet[j].destPortleft_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ <= filterSet[j].destPortright_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_))
)
&&
(
((filterSet[i].srcPortleft_ <= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ >= filterSet[j].srcPortleft_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ <= filterSet[j].srcPortright_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_))
)
)
{
if (filterSet[i].destIPLen_ > filterSet[j].destIPLen_ ){
if ( genPrefix(filterSet[i].destIP_, filterSet[j].destIPLen_) == filterSet[j].destIP_){
//counter1++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal[threadIndex].result++;
//shm[threadIndex]++;
total++;
}
}
if (filterSet[i].destIPLen_ < filterSet[j].destIPLen_){
if ( genPrefix(filterSet[j].destIP_, filterSet[i].destIPLen_) == filterSet[i].destIP_){
//counter2++;
/*
if(
( (rule.filterSet_[i].destPortleft_ <= rule.filterSet_[j].destPortleft_) && (rule.filterSet_[i].destPortright_ >= rule.filterSet_[j].destPortright_) ) &&
( (rule.filterSet_[i].srcPortleft_ <= rule.filterSet_[j].srcPortleft_) && (rule.filterSet_[i].srcPortright_ >= rule.filterSet_[j].srcPortright_) ) &&
( (rule.filterSet_[i].pro_num_ == rule.filterSet_[j].pro_num_) || (rule.filterSet_[i].pro_num_ == 0) )
){}
else
*/
//devTotal[threadIndex].result++;
//shm[threadIndex]++;
total++;
}
}
if (filterSet[i].destIPLen_ == filterSet[j].destIPLen_){
if ( filterSet[i].destIP_ == filterSet[j].destIP_){
//counter3++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal[threadIndex].result++;
//shm[threadIndex]++;
total++;
}
}
}
}
}
}
}
}
//printf("%d\t", total);
total = blockReduceSum(total);
if(threadIdx.x == 0)
d_total[blockIdx.x] = total;
}
// base on new, distribute filters to thread by work. (approach 2)
__global__ void gpu_conflict_detect_3(filter * const __restrict__ filterSet, int rule_size, int* d_total)
{
//printf("blockIdx.x: %d, threadIdx.x: %d\n", blockIdx.x, threadIdx.x);
//unsigned short start = threadIdx.x * gridDim.x + blockIdx.x;
unsigned short threadID = blockIdx.x * blockDim.x + threadIdx.x;
unsigned short threadSize = gridDim.x * blockDim.x;
unsigned short base = threadSize * 2;
unsigned short start = (base - 1) - threadID;
unsigned int total = 0;
//printf("threadSize:%d,%d\n", threadSize, rule_size/threadSize + 1);
if(threadID > rule_size) return;
for (unsigned short i=threadID; i<rule_size; i+=base) {
//for(int i=threadID; i<rule_size; i+=threadSize){
//devTotal[threadIndex].set = 1;
#pragma unroll
for (unsigned short j= 0; j<i; j++) {
//if(j>=i) break;
if( filterSet[i].srcIPLen_ > filterSet[j].srcIPLen_ ){
if( genPrefix(filterSet[i].srcIP_, filterSet[j].srcIPLen_) == filterSet[j].srcIP_ ){
if(( (!filterSet[i].pro_num_ || !filterSet[j].pro_num_) || (filterSet[i].pro_num_ == filterSet[j].pro_num_ )) ) {
if (
(
((filterSet[i].destPortleft_ <= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ >= filterSet[j].destPortleft_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ <= filterSet[j].destPortright_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_))
)
&&
(
((filterSet[i].srcPortleft_ <= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ >= filterSet[j].srcPortleft_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ <= filterSet[j].srcPortright_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_))
)
)
{
if (filterSet[i].destIPLen_ > filterSet[j].destIPLen_ ){
if ( genPrefix(filterSet[i].destIP_, filterSet[j].destIPLen_) == filterSet[j].destIP_){
//counter1++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal[threadIndex].result++;
//shm[threadIndex]++;
total++;
}
}
if (filterSet[i].destIPLen_ < filterSet[j].destIPLen_){
if ( genPrefix(filterSet[j].destIP_, filterSet[i].destIPLen_) == filterSet[i].destIP_){
//counter2++;
/*
if(
( (rule.filterSet_[i].destPortleft_ <= rule.filterSet_[j].destPortleft_) && (rule.filterSet_[i].destPortright_ >= rule.filterSet_[j].destPortright_) ) &&
( (rule.filterSet_[i].srcPortleft_ <= rule.filterSet_[j].srcPortleft_) && (rule.filterSet_[i].srcPortright_ >= rule.filterSet_[j].srcPortright_) ) &&
( (rule.filterSet_[i].pro_num_ == rule.filterSet_[j].pro_num_) || (rule.filterSet_[i].pro_num_ == 0) )
){}
else
*/
//devTotal[threadIndex].result++;
//shm[threadIndex]++;
total++;
}
}
if (filterSet[i].destIPLen_ == filterSet[j].destIPLen_){
if ( filterSet[i].destIP_ == filterSet[j].destIP_){
//counter3++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal[threadIndex].result++;
//shm[threadIndex]++;
total++;
}
}
}
}
}
}
}
}
//if(start > rule_size) return;
for (unsigned short i=start; i<rule_size; i+=base) {
//for(int i=threadID; i<rule_size; i+=threadSize){
//devTotal[threadIndex].set = 1;
#pragma unroll
for (unsigned short j= 0; j<i; j++) {
//if(j>=i) break;
if( filterSet[i].srcIPLen_ > filterSet[j].srcIPLen_ ){
if( genPrefix(filterSet[i].srcIP_, filterSet[j].srcIPLen_) == filterSet[j].srcIP_ ){
if(( (!filterSet[i].pro_num_ || !filterSet[j].pro_num_) || (filterSet[i].pro_num_ == filterSet[j].pro_num_ )) ) {
if (
(
((filterSet[i].destPortleft_ <= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ >= filterSet[j].destPortleft_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ <= filterSet[j].destPortright_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_))
)
&&
(
((filterSet[i].srcPortleft_ <= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ >= filterSet[j].srcPortleft_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ <= filterSet[j].srcPortright_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_))
)
)
{
if (filterSet[i].destIPLen_ > filterSet[j].destIPLen_ ){
if ( genPrefix(filterSet[i].destIP_, filterSet[j].destIPLen_) == filterSet[j].destIP_){
//counter1++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal[threadIndex].result++;
//shm[threadIndex]++;
total++;
}
}
if (filterSet[i].destIPLen_ < filterSet[j].destIPLen_){
if ( genPrefix(filterSet[j].destIP_, filterSet[i].destIPLen_) == filterSet[i].destIP_){
//counter2++;
/*
if(
( (rule.filterSet_[i].destPortleft_ <= rule.filterSet_[j].destPortleft_) && (rule.filterSet_[i].destPortright_ >= rule.filterSet_[j].destPortright_) ) &&
( (rule.filterSet_[i].srcPortleft_ <= rule.filterSet_[j].srcPortleft_) && (rule.filterSet_[i].srcPortright_ >= rule.filterSet_[j].srcPortright_) ) &&
( (rule.filterSet_[i].pro_num_ == rule.filterSet_[j].pro_num_) || (rule.filterSet_[i].pro_num_ == 0) )
){}
else
*/
//devTotal[threadIndex].result++;
//shm[threadIndex]++;
total++;
}
}
if (filterSet[i].destIPLen_ == filterSet[j].destIPLen_){
if ( filterSet[i].destIP_ == filterSet[j].destIP_){
//counter3++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal[threadIndex].result++;
//shm[threadIndex]++;
total++;
}
}
}
}
}
}
}
}
//printf("%d\t", total);
total = blockReduceSum(total);
if(threadIdx.x == 0)
d_total[blockIdx.x] = total;
}
/*
__global__ void gpu_conflict_detect_2(const filter * __restrict__ filterSet, int rule_size, dev_var* devTotal)
{
//printf("blockIdx.x: %d, threadIdx.x: %d\n", blockIdx.x, threadIdx.x);
int threadID = threadIdx.x * gridDim.x + blockIdx.x;
int threadSize = gridDim.x * blockDim.x;
int total = 0;
//devTotal[threadID].set = 1;
//printf("threadSize:%d,%d\n", threadSize, rule_size/threadSize + 1);
for (int i=threadID; i<rule_size; i+=threadSize) {
//for(int i=threadID; i<rule_size; i+=threadSize){
//devTotal[i].result = 0;
for (int j= 0; j<rule_size ; j++) {
if( __ldg(&filterSet[i].srcIPLen_) > __ldg(&filterSet[j].srcIPLen_) ){
if( genPrefix(__ldg(&filterSet[i].srcIP_), __ldg(&filterSet[j].srcIPLen_)) == __ldg(&filterSet[j].srcIP_) ){
if(( (!__ldg(&filterSet[i].pro_num_) || !__ldg(&filterSet[j].pro_num_)) || (__ldg(&filterSet[i].pro_num_) == __ldg(&filterSet[j].pro_num_) )) ) {
if (
(
((__ldg(&filterSet[i].destPortleft_) <= __ldg(&filterSet[j].destPortright_)) && (__ldg(&filterSet[i].destPortleft_) >= __ldg(&filterSet[j].destPortleft_))) ||
((__ldg(&filterSet[i].destPortright_) >= __ldg(&filterSet[j].destPortleft_)) && (__ldg(&filterSet[i].destPortright_) <= __ldg(&filterSet[j].destPortright_))) ||
((__ldg(&filterSet[i].destPortright_) >= __ldg(&filterSet[j].destPortright_)) && (__ldg(&filterSet[i].destPortleft_) <= __ldg(&filterSet[j].destPortleft_)))
)
&&
(
((__ldg(&filterSet[i].srcPortleft_) <= __ldg(&filterSet[j].srcPortright_)) && (__ldg(&filterSet[i].srcPortleft_) >= __ldg(&filterSet[j].srcPortleft_))) ||
((__ldg(&filterSet[i].srcPortright_) >= __ldg(&filterSet[j].srcPortleft_)) && (__ldg(&filterSet[i].srcPortright_) <= __ldg(&filterSet[j].srcPortright_))) ||
((__ldg(&filterSet[i].srcPortright_) >= __ldg(&filterSet[j].srcPortright_)) && (__ldg(&filterSet[i].srcPortleft_) <= __ldg(&filterSet[j].srcPortleft_)))
)
)
{
if (__ldg(&filterSet[i].destIPLen_) > __ldg(&filterSet[j].destIPLen_) ){
if ( genPrefix(__ldg(&filterSet[i].destIP_), __ldg(&filterSet[j].destIPLen_)) == __ldg(&filterSet[j].destIP_)){
//counter1++;
if(
( (__ldg(&filterSet[i].destPortleft_) <= __ldg(&filterSet[j].destPortleft_)) && (__ldg(&filterSet[i].destPortright_) >= __ldg(&filterSet[j].destPortright_)) ) &&
( (__ldg(&filterSet[i].srcPortleft_) <= __ldg(&filterSet[j].srcPortleft_)) && (__ldg(&filterSet[i].srcPortright_) >= __ldg(&filterSet[j].srcPortright_)) ) &&
( (__ldg(&filterSet[i].pro_num_) == __ldg(&filterSet[j].pro_num_)) || (__ldg(&filterSet[i].pro_num_) == 0) )
){}
else
//devTotal++;
//devTotal[threadID].result++;
total++;
}
}
if (__ldg(&filterSet[i].destIPLen_) < __ldg(&filterSet[j].destIPLen_)){
if ( genPrefix(__ldg(&filterSet[j].destIP_), __ldg(&filterSet[i].destIPLen_)) == __ldg(&filterSet[i].destIP_)){
//counter2++;
//if(
//( (rule.__ldg(&filterSet_[i].destPortleft_) <= rule.__ldg(&filterSet_[j].destPortleft_)) && (rule.__ldg(&filterSet_[i].destPortright_) >= rule.__ldg(&filterSet_[j].destPortright_)) ) &&
//( (rule.__ldg(&filterSet_[i].srcPortleft_) <= rule.__ldg(&filterSet_[j].srcPortleft_)) && (rule.__ldg(&filterSet_[i].srcPortright_) >= rule.__ldg(&filterSet_[j].srcPortright_)) ) &&
//( (rule.__ldg(&filterSet_[i].pro_num_) == rule.__ldg(&filterSet_[j].pro_num_)) || (rule.__ldg(&filterSet_[i].pro_num_) == 0) )
//){}
//else
//devTotal++;
//devTotal[threadID].result++;
total++;
}
}
if (__ldg(&filterSet[i].destIPLen_) == __ldg(&filterSet[j].destIPLen_)){
if ( __ldg(&filterSet[i].destIP_) == __ldg(&filterSet[j].destIP_)){
//counter3++;
if(
( (__ldg(&filterSet[i].destPortleft_) <= __ldg(&filterSet[j].destPortleft_)) && (__ldg(&filterSet[i].destPortright_) >= __ldg(&filterSet[j].destPortright_)) ) &&
( (__ldg(&filterSet[i].srcPortleft_) <= __ldg(&filterSet[j].srcPortleft_)) && (__ldg(&filterSet[i].srcPortright_) >= __ldg(&filterSet[j].srcPortright_)) ) &&
( (__ldg(&filterSet[i].pro_num_) == __ldg(&filterSet[j].pro_num_)) || (__ldg(&filterSet[i].pro_num_) == 0) )
){}
else
//devTotal++;
//devTotal[threadID].result++;
total++;
}
}
}
}
}
}
}
}
//devTotal[threadID].set = 1;
//devTotal[threadID].result += total;
//__syncthreads();
//devTotal[threadIdx.x].set = 1;
//devTotal[threadIdx.x].result += shm[threadIdx.x];
//printf("%d\t", devTotal[threadIdx.x].result);
}
*/
void host_conflict_detect(filter *filterSet, int rule_size, int &total)
{
for (int i=0; i<rule_size; i++) {
for (int j= 0; j<rule_size; j++) {
if( (filterSet[i].srcIPLen_ > filterSet[j].srcIPLen_ ) && ( genPrefix(filterSet[i].srcIP_, filterSet[j].srcIPLen_) == filterSet[j].srcIP_ ) ){
if(( (!filterSet[i].pro_num_ || !filterSet[j].pro_num_) || (filterSet[i].pro_num_ == filterSet[j].pro_num_ )) ) {
if (
(
((filterSet[i].destPortleft_ <= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ >= filterSet[j].destPortleft_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ <= filterSet[j].destPortright_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_))
)
&&
(
((filterSet[i].srcPortleft_ <= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ >= filterSet[j].srcPortleft_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ <= filterSet[j].srcPortright_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_))
)
)
{
if (filterSet[i].destIPLen_ > filterSet[j].destIPLen_ ){
if ( genPrefix(filterSet[i].destIP_, filterSet[j].destIPLen_) == filterSet[j].destIP_){
//counter1++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
total++;
}
}
if (filterSet[i].destIPLen_ < filterSet[j].destIPLen_){
if ( genPrefix(filterSet[j].destIP_, filterSet[i].destIPLen_) == filterSet[i].destIP_){
//counter2++;
/*
if(
( (rule.filterSet_[i].destPortleft_ <= rule.filterSet_[j].destPortleft_) && (rule.filterSet_[i].destPortright_ >= rule.filterSet_[j].destPortright_) ) &&
( (rule.filterSet_[i].srcPortleft_ <= rule.filterSet_[j].srcPortleft_) && (rule.filterSet_[i].srcPortright_ >= rule.filterSet_[j].srcPortright_) ) &&
( (rule.filterSet_[i].pro_num_ == rule.filterSet_[j].pro_num_) || (rule.filterSet_[i].pro_num_ == 0) )
){}
else
*/
total++;
}
}
if (filterSet[i].destIPLen_ == filterSet[j].destIPLen_){
if ( filterSet[i].destIP_ == filterSet[j].destIP_){
//counter3++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
total++;
}
}
}
}
}
}
}
}
int main(int argc, char *argv[])
{
ofstream fout;
//fout.open("0727M2.txt", ios::app);
double sort_start, hconflict_start, g1conflict_start, g2conflict_start, g0conflict_start, g3conflict_start, g3copy1_start, g3copy2_start;
double sort_time, hconflict_time, g1conflict_time, g2conflict_time, g0conflict_time, g3conflict_time, g3copy1_time, g3copy2_time;
int db_size;
int total = 0;// for host computaion result
int *d_total;// store device result
int *h_total;// store host result
unsigned long sum = 0;
// CUDA variable
filter* dev_filterSet; // store filter in device
//insert filters
if (db.loadFilters(argv[1]) == -1)
err_quit("Error: unable to open the filter database.");
db_size = db.size(); // get database size
printf("number of rules: %d\n", db_size);
int block = 512;
int grid = 8;
//int grid =( db_size + block - 1)/block;
//if(grid == 0)
//grid = 1;
// set to 0 for copying to device variable
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
//hipSetDevice(0);
// ################ Prepare call M0 funtion ###################
// prepare device variable
hipMalloc((void**)&dev_filterSet, db_size * sizeof(filter));
cudaCheckErrors("cudaMalloc1 fail");
hipMalloc((void**)&d_total, grid*sizeof(int));
cudaCheckErrors("cudaMalloc2 fail");
hipMemset(d_total, 0, grid*sizeof(int));
h_total = (int*)malloc(grid*sizeof(int));
// call kernel function
g0conflict_start = cpuSecond();
// copy to device
hipMemcpy(dev_filterSet, db.filterSet_, db_size * sizeof(filter), hipMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy1 fail");
hipLaunchKernelGGL(( gpu_conflict_detect_0), dim3(grid), dim3(block), 0, 0, dev_filterSet, db_size, d_total);
//hipDeviceSynchronize();
// copy device result ot host
hipMemcpy(h_total, d_total, grid*sizeof(int), hipMemcpyDeviceToHost);
sum = 0;
for(int tmp = 0; tmp < grid; tmp++)
sum += h_total[tmp];
g0conflict_time = cpuSecond() - g0conflict_start;
cout << "m0_detection_time: " << g0conflict_time << " usec" << endl;
printf("m0 conflict detection: %lu\t", sum);
//for(int tmp = 0; tmp < grid; tmp++)
//cout<< h_total[tmp]<< "\t";
cout<<endl;
//fout<< g0conflict_time<< "\t";
//for(int tmp = 0; tmp < grid; tmp++)
//fout<< h_total[tmp]<< "\t";
//fout<< "\n";
free(h_total);
hipFree(dev_filterSet);
hipFree(d_total);
hipDeviceReset();
// sort filter with srcIPlen & desIPlen
sort_start = cpuSecond();
bubble_sort(db, db_size);
sort_time = cpuSecond() - sort_start;
//cout << "sort_time: " << sort_time << " usec" << endl;
/*
// ################ Prepare call M1 funtion ###################
// prepare device variable
hipMalloc((void**)&dev_filterSet, db_size * sizeof(filter));
cudaCheckErrors("cudaMalloc1 fail");
hipMalloc((void**)&d_total, grid*sizeof(int));
cudaCheckErrors("cudaMalloc2 fail");
hipMemset(d_total, 0, grid*sizeof(int));
h_total = (int*)malloc(grid*sizeof(int));
// call kernel function
g1conflict_start = cpuSecond();
// copy to device
hipMemcpy(dev_filterSet, db.filterSet_, db_size * sizeof(filter), hipMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy1 fail");
gpu_conflict_detect_1<<<grid, block>>>(dev_filterSet, db_size, d_total);
//hipDeviceSynchronize();
// copy device result ot host
hipMemcpy(h_total, d_total, grid*sizeof(int), hipMemcpyDeviceToHost);
sum = 0;
for(int tmp = 0; tmp < grid; tmp++)
sum += h_total[tmp];
g1conflict_time = cpuSecond() - g1conflict_start;
cout << "m1_detection_time: " << g1conflict_time << " usec" << endl;
printf("m1 conflict detection: %d\t", sum);
//for(int tmp = 0; tmp < grid; tmp++)
//cout<< h_total[tmp]<< "\t";
cout<<endl;
//fout<< g1conflict_time<< "\t";
//for(int tmp = 0; tmp < grid; tmp++)
//fout<< h_total[tmp]<< "\t";
//fout<< "\n";
free(h_total);
hipFree(dev_filterSet);
hipFree(d_total);
hipDeviceReset();
*/
// ################ Prepare call M2 funtion ###################
// prepare device variable
hipMalloc((void**)&dev_filterSet, db_size * sizeof(filter));
cudaCheckErrors("cudaMalloc1 fail");
hipMalloc((void**)&d_total, grid*sizeof(int));
cudaCheckErrors("cudaMalloc2 fail");
hipMemset(d_total, 0, grid*sizeof(int));
h_total = (int*)malloc(grid*sizeof(int));
// call kernel function method 2
g2conflict_start = cpuSecond();
// copy to device
hipMemcpy(dev_filterSet, db.filterSet_, db_size * sizeof(filter), hipMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy1 fail");
hipLaunchKernelGGL(( gpu_conflict_detect_2), dim3(grid), dim3(block), 0, 0, dev_filterSet, db_size, d_total);
//hipDeviceSynchronize();
// copy device result ot host
hipMemcpy(h_total, d_total, grid*sizeof(int), hipMemcpyDeviceToHost);
sum = 0;
for(int tmp = 0; tmp < grid; tmp++)
sum += h_total[tmp];
g2conflict_time = cpuSecond() - g2conflict_start;
cout << "m2_detection_time: " << g2conflict_time << " usec" << endl;
printf("m2 conflict detection: %lu\t", sum);
//for(int tmp = 0; tmp < grid; tmp++)
//cout<< h_total[tmp]<< "\t";
cout<<endl;
//fout<< g2conflict_time<< "\t";
//for(int tmp = 0; tmp < grid; tmp++)
//fout<< h_total[tmp]<< "\t";
//fout<< "\n";
free(h_total);
hipFree(dev_filterSet);
hipFree(d_total);
hipDeviceReset();
// ################ Prepare call M3 funtion ###################
// prepare device variable
hipMalloc((void**)&dev_filterSet, db_size * sizeof(filter));
cudaCheckErrors("cudaMalloc1 fail");
hipMalloc((void**)&d_total, grid*sizeof(int));
cudaCheckErrors("cudaMalloc2 fail");
hipMemset(d_total, 0, grid*sizeof(int));
h_total = (int*)malloc(grid*sizeof(int));
// call kernel function
//g3conflict_start = cpuSecond();
g3copy1_start = cpuSecond();
// copy to device
hipMemcpy(dev_filterSet, db.filterSet_, db_size * sizeof(filter), hipMemcpyHostToDevice);
g3copy1_time = cpuSecond() - g3copy1_start;
cudaCheckErrors("cudaMemcpy1 fail");
g3conflict_start = cpuSecond();
hipLaunchKernelGGL(( gpu_conflict_detect_3), dim3(grid), dim3(block), 0, 0, dev_filterSet, db_size, d_total);
//hipDeviceSynchronize();
g3conflict_time = cpuSecond() - g3conflict_start;
g3copy2_start = cpuSecond();
// copy device result ot host
hipMemcpy(h_total, d_total, grid*sizeof(int), hipMemcpyDeviceToHost);
g3copy2_time = cpuSecond() - g3copy2_start;
sum = 0;
for(int tmp = 0; tmp < grid; tmp++)
sum += h_total[tmp];
cout << "m3_copy1_time: " << g3copy1_time << " usec" << endl;
cout << "m3_detection_time: " << g3conflict_time << " usec" << endl;
cout << "m3_copy2_time: " << g3copy2_time << " usec" << endl;
printf("m3 conflict detection: %d\t", sum);
//for(int tmp = 0; tmp < grid; tmp++)
//cout<< h_total[tmp]<< "\t";
cout<<endl;
//fout<< g1conflict_time<< "\t";
//for(int tmp = 0; tmp < grid; tmp++)
//fout<< h_total[tmp]<< "\t";
//fout<< "\n";
free(h_total);
hipFree(dev_filterSet);
hipFree(d_total);
hipDeviceReset();
// ################ Prepare call host funtion ###################
hconflict_start = cpuSecond();
host_conflict_detect(db.filterSet_, db_size, total);
hconflict_time = cpuSecond() - hconflict_start;
cout.setf(ios::fixed);
cout << std::noshowpoint << "host_detection_time: " << hconflict_time << " usec" << endl;
printf("Host conflict detection: %d\n", total);
cout<<endl;
fout.open("0818_5128_fw5_5.txt", ios::app);
fout<< hconflict_time<< "\t";
fout<< g0conflict_time<< "\t";
//fout<< g1conflict_time<< "\t";
fout<< g2conflict_time<< "\t";
fout<< g3conflict_time<< "\n";
fout.close();
//free(cpy_dev);
//hipFree(dev_filterSet);
//hipFree(dev_total);
}
|
cb5f7fc506b63f1fa48e059ffc144c88783b2558.cu
|
#include <stdio.h>
#include <string.h>
#include "err_handler.h"
#include "filter_db.h"
#include "util.h"
#include <sys/time.h>
#include <fstream>
#include <iostream>
#include <cuda_runtime.h>
#define BUF_SZ 256
using namespace std;
clsFilterDB db;
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), __FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
__inline__ __device__ int warpReduceSum(int val)
{
for(int offset = warpSize/2; offset > 0; offset /= 2)
{
val += __shfl_down(val, offset);
//printf("%d\t", val);
}
return val;
}
__inline__ __device__ int blockReduceSum(int val)
{
static __shared__ int shared[64];
//static __shared__ int shared[12*1024];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSum(val); // warp partial sum
if(lane == 0) shared[wid] = val; // store each warp partial sum
__syncthreads();
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if(wid==0) val = warpReduceSum(val);
return val;
}
__inline__ __host__ __device__ UINT64 genPrefix(UINT64 prefix, unsigned char len)
{
register UINT64 result;
register UINT mask_m;
if (len == 0)
return(0);
else{
// mask = 0xFFFFFFFFFFFFFFFFLL << (64-len);
mask_m = 0xFFFFFFFF << (32-len);
result = prefix & mask_m;
return(result);
}
}
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec*1e6 + (double)tp.tv_usec);
}
void bubble_sort(clsFilterDB item, int len)
{
int i, j;
filter temp;
for(i = 0; i < len - 1; i++)
for(j = 0; j < len - i - 1; j++)
{
if(item.filterSet_[j].destIPLen_ > item.filterSet_[j+1].destIPLen_)
{
//printf("1.srcIP: %d\n", item.filterSet_[j].srcIPLen_);
temp = item.filterSet_[j];
item.filterSet_[j] = item.filterSet_[j+1];
item.filterSet_[j+1] = temp;
//printf("2.srcIP: %d\n", item.filterSet_[j].srcIPLen_);
}
if(item.filterSet_[j].srcIPLen_ > item.filterSet_[j+1].srcIPLen_)
{
//printf("1.srcIP: %d\n", item.filterSet_[j].srcIPLen_);
temp = item.filterSet_[j];
item.filterSet_[j] = item.filterSet_[j+1];
item.filterSet_[j+1] = temp;
//printf("2.srcIP: %d\n", item.filterSet_[j].srcIPLen_);
}
}
}
// distribute filters before sorting, to thread by order. (approach 0)
__global__ void gpu_conflict_detect_0(filter * const __restrict__ filterSet, int rule_size, int* d_total)
{
//printf("blockIdx.x: %d, threadIdx.x: %d\n", blockIdx.x, threadIdx.x);
unsigned short threadID = blockDim.x * blockIdx.x + threadIdx.x;
unsigned short threadSize = gridDim.x * blockDim.x;
unsigned short base = rule_size/threadSize+1;
unsigned short start = threadID*base;
unsigned int total = 0;
//printf("threadSize:%d,%d\n", threadSize, rule_size/threadSize + 1);
if(start > rule_size) return;
//int k = 0;
for (unsigned short i=start; i<(start+base) && i<rule_size ; i++) {
//for(int i=threadID; i<rule_size; i+=threadSize){
//k = rule_size - i;
#pragma unroll
for (unsigned short j= 0; j<rule_size; j++) {
//if(j>=i) return;
if( filterSet[i].srcIPLen_ > filterSet[j].srcIPLen_ ){
if( genPrefix(filterSet[i].srcIP_, filterSet[j].srcIPLen_) == filterSet[j].srcIP_ ){
if(( (!filterSet[i].pro_num_ || !filterSet[j].pro_num_) || (filterSet[i].pro_num_ == filterSet[j].pro_num_ )) ) {
if (
(
((filterSet[i].destPortleft_ <= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ >= filterSet[j].destPortleft_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ <= filterSet[j].destPortright_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_))
)
&&
(
((filterSet[i].srcPortleft_ <= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ >= filterSet[j].srcPortleft_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ <= filterSet[j].srcPortright_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_))
)
)
{
if (filterSet[i].destIPLen_ > filterSet[j].destIPLen_ ){
if ( genPrefix(filterSet[i].destIP_, filterSet[j].destIPLen_) == filterSet[j].destIP_){
//counter1++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal++;
//devTotal[threadID].result++;
total++;
}
}
if (filterSet[i].destIPLen_ < filterSet[j].destIPLen_){
if ( genPrefix(filterSet[j].destIP_, filterSet[i].destIPLen_) == filterSet[i].destIP_){
//counter2++;
/*
if(
( (rule.filterSet_[i].destPortleft_ <= rule.filterSet_[j].destPortleft_) && (rule.filterSet_[i].destPortright_ >= rule.filterSet_[j].destPortright_) ) &&
( (rule.filterSet_[i].srcPortleft_ <= rule.filterSet_[j].srcPortleft_) && (rule.filterSet_[i].srcPortright_ >= rule.filterSet_[j].srcPortright_) ) &&
( (rule.filterSet_[i].pro_num_ == rule.filterSet_[j].pro_num_) || (rule.filterSet_[i].pro_num_ == 0) )
){}
else
*/
//devTotal++;
//devTotal[threadID].result++;
total++;
}
}
if (filterSet[i].destIPLen_ == filterSet[j].destIPLen_){
if ( filterSet[i].destIP_ == filterSet[j].destIP_){
//counter3++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal++;
//devTotal[threadID].result++;
total++;
}
}
}
}
}
}
}
}
total = blockReduceSum(total);
if(threadIdx.x == 0)
d_total[blockIdx.x] = total;
}
// distribute filters after sorting, to thread by order. (approach 1)
__global__ void gpu_conflict_detect_1(filter * const __restrict__ filterSet, int rule_size, int* d_total)
{
//printf("blockIdx.x: %d, threadIdx.x: %d\n", blockIdx.x, threadIdx.x);
unsigned short threadID = blockDim.x * blockIdx.x + threadIdx.x;
unsigned short threadSize = gridDim.x * blockDim.x;
unsigned short base = rule_size/threadSize+1;
unsigned short start = threadID*base;
unsigned int total = 0;
//printf("threadSize:%d,%d\n", threadSize, rule_size/threadSize + 1);
if(start > rule_size) return;
//int k = 0;
for (unsigned short i=start; i<(start+base) && i<rule_size ; i++) {
//for(int i=threadID; i<rule_size; i+=threadSize){
//k = rule_size - i;
#pragma unroll
for (unsigned short j= 0; j<i; j++) {
//if(j>=i) return;
if( filterSet[i].srcIPLen_ > filterSet[j].srcIPLen_ ){
if( genPrefix(filterSet[i].srcIP_, filterSet[j].srcIPLen_) == filterSet[j].srcIP_ ){
if(( (!filterSet[i].pro_num_ || !filterSet[j].pro_num_) || (filterSet[i].pro_num_ == filterSet[j].pro_num_ )) ) {
if (
(
((filterSet[i].destPortleft_ <= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ >= filterSet[j].destPortleft_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ <= filterSet[j].destPortright_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_))
)
&&
(
((filterSet[i].srcPortleft_ <= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ >= filterSet[j].srcPortleft_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ <= filterSet[j].srcPortright_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_))
)
)
{
if (filterSet[i].destIPLen_ > filterSet[j].destIPLen_ ){
if ( genPrefix(filterSet[i].destIP_, filterSet[j].destIPLen_) == filterSet[j].destIP_){
//counter1++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal++;
//devTotal[threadID].result++;
total++;
}
}
if (filterSet[i].destIPLen_ < filterSet[j].destIPLen_){
if ( genPrefix(filterSet[j].destIP_, filterSet[i].destIPLen_) == filterSet[i].destIP_){
//counter2++;
/*
if(
( (rule.filterSet_[i].destPortleft_ <= rule.filterSet_[j].destPortleft_) && (rule.filterSet_[i].destPortright_ >= rule.filterSet_[j].destPortright_) ) &&
( (rule.filterSet_[i].srcPortleft_ <= rule.filterSet_[j].srcPortleft_) && (rule.filterSet_[i].srcPortright_ >= rule.filterSet_[j].srcPortright_) ) &&
( (rule.filterSet_[i].pro_num_ == rule.filterSet_[j].pro_num_) || (rule.filterSet_[i].pro_num_ == 0) )
){}
else
*/
//devTotal++;
//devTotal[threadID].result++;
total++;
}
}
if (filterSet[i].destIPLen_ == filterSet[j].destIPLen_){
if ( filterSet[i].destIP_ == filterSet[j].destIP_){
//counter3++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal++;
//devTotal[threadID].result++;
total++;
}
}
}
}
}
}
}
}
total = blockReduceSum(total);
if(threadIdx.x == 0)
d_total[blockIdx.x] = total;
}
// base on new, distribute filters to thread by order. (approach 1)
__global__ void gpu_conflict_detect_2(filter * const __restrict__ filterSet, int rule_size, int* d_total)
{
///printf("blockIdx.x: %d, threadIdx.x: %d\n", blockIdx.x, threadIdx.x);
unsigned short start = threadIdx.x * gridDim.x + blockIdx.x;
unsigned short threadSize = gridDim.x * blockDim.x;
int total = 0;
//printf("threadSize:%d,%d\n", threadSize, rule_size/threadSize + 1);
if(start > rule_size) return;
for (unsigned short i=start; i<rule_size; i+=threadSize) {
//for(int i=threadID; i<rule_size; i+=threadSize){
//devTotal[threadIndex].set = 1;
#pragma unroll
for (unsigned short j= 0; j<i; j++) {
//if(j>=i) break;
if( filterSet[i].srcIPLen_ > filterSet[j].srcIPLen_ ){
if( genPrefix(filterSet[i].srcIP_, filterSet[j].srcIPLen_) == filterSet[j].srcIP_ ){
if(( (!filterSet[i].pro_num_ || !filterSet[j].pro_num_) || (filterSet[i].pro_num_ == filterSet[j].pro_num_ )) ) {
if (
(
((filterSet[i].destPortleft_ <= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ >= filterSet[j].destPortleft_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ <= filterSet[j].destPortright_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_))
)
&&
(
((filterSet[i].srcPortleft_ <= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ >= filterSet[j].srcPortleft_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ <= filterSet[j].srcPortright_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_))
)
)
{
if (filterSet[i].destIPLen_ > filterSet[j].destIPLen_ ){
if ( genPrefix(filterSet[i].destIP_, filterSet[j].destIPLen_) == filterSet[j].destIP_){
//counter1++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal[threadIndex].result++;
//shm[threadIndex]++;
total++;
}
}
if (filterSet[i].destIPLen_ < filterSet[j].destIPLen_){
if ( genPrefix(filterSet[j].destIP_, filterSet[i].destIPLen_) == filterSet[i].destIP_){
//counter2++;
/*
if(
( (rule.filterSet_[i].destPortleft_ <= rule.filterSet_[j].destPortleft_) && (rule.filterSet_[i].destPortright_ >= rule.filterSet_[j].destPortright_) ) &&
( (rule.filterSet_[i].srcPortleft_ <= rule.filterSet_[j].srcPortleft_) && (rule.filterSet_[i].srcPortright_ >= rule.filterSet_[j].srcPortright_) ) &&
( (rule.filterSet_[i].pro_num_ == rule.filterSet_[j].pro_num_) || (rule.filterSet_[i].pro_num_ == 0) )
){}
else
*/
//devTotal[threadIndex].result++;
//shm[threadIndex]++;
total++;
}
}
if (filterSet[i].destIPLen_ == filterSet[j].destIPLen_){
if ( filterSet[i].destIP_ == filterSet[j].destIP_){
//counter3++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal[threadIndex].result++;
//shm[threadIndex]++;
total++;
}
}
}
}
}
}
}
}
//printf("%d\t", total);
total = blockReduceSum(total);
if(threadIdx.x == 0)
d_total[blockIdx.x] = total;
}
// base on new, distribute filters to thread by work. (approach 2)
__global__ void gpu_conflict_detect_3(filter * const __restrict__ filterSet, int rule_size, int* d_total)
{
//printf("blockIdx.x: %d, threadIdx.x: %d\n", blockIdx.x, threadIdx.x);
//unsigned short start = threadIdx.x * gridDim.x + blockIdx.x;
unsigned short threadID = blockIdx.x * blockDim.x + threadIdx.x;
unsigned short threadSize = gridDim.x * blockDim.x;
unsigned short base = threadSize * 2;
unsigned short start = (base - 1) - threadID;
unsigned int total = 0;
//printf("threadSize:%d,%d\n", threadSize, rule_size/threadSize + 1);
if(threadID > rule_size) return;
for (unsigned short i=threadID; i<rule_size; i+=base) {
//for(int i=threadID; i<rule_size; i+=threadSize){
//devTotal[threadIndex].set = 1;
#pragma unroll
for (unsigned short j= 0; j<i; j++) {
//if(j>=i) break;
if( filterSet[i].srcIPLen_ > filterSet[j].srcIPLen_ ){
if( genPrefix(filterSet[i].srcIP_, filterSet[j].srcIPLen_) == filterSet[j].srcIP_ ){
if(( (!filterSet[i].pro_num_ || !filterSet[j].pro_num_) || (filterSet[i].pro_num_ == filterSet[j].pro_num_ )) ) {
if (
(
((filterSet[i].destPortleft_ <= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ >= filterSet[j].destPortleft_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ <= filterSet[j].destPortright_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_))
)
&&
(
((filterSet[i].srcPortleft_ <= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ >= filterSet[j].srcPortleft_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ <= filterSet[j].srcPortright_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_))
)
)
{
if (filterSet[i].destIPLen_ > filterSet[j].destIPLen_ ){
if ( genPrefix(filterSet[i].destIP_, filterSet[j].destIPLen_) == filterSet[j].destIP_){
//counter1++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal[threadIndex].result++;
//shm[threadIndex]++;
total++;
}
}
if (filterSet[i].destIPLen_ < filterSet[j].destIPLen_){
if ( genPrefix(filterSet[j].destIP_, filterSet[i].destIPLen_) == filterSet[i].destIP_){
//counter2++;
/*
if(
( (rule.filterSet_[i].destPortleft_ <= rule.filterSet_[j].destPortleft_) && (rule.filterSet_[i].destPortright_ >= rule.filterSet_[j].destPortright_) ) &&
( (rule.filterSet_[i].srcPortleft_ <= rule.filterSet_[j].srcPortleft_) && (rule.filterSet_[i].srcPortright_ >= rule.filterSet_[j].srcPortright_) ) &&
( (rule.filterSet_[i].pro_num_ == rule.filterSet_[j].pro_num_) || (rule.filterSet_[i].pro_num_ == 0) )
){}
else
*/
//devTotal[threadIndex].result++;
//shm[threadIndex]++;
total++;
}
}
if (filterSet[i].destIPLen_ == filterSet[j].destIPLen_){
if ( filterSet[i].destIP_ == filterSet[j].destIP_){
//counter3++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal[threadIndex].result++;
//shm[threadIndex]++;
total++;
}
}
}
}
}
}
}
}
//if(start > rule_size) return;
for (unsigned short i=start; i<rule_size; i+=base) {
//for(int i=threadID; i<rule_size; i+=threadSize){
//devTotal[threadIndex].set = 1;
#pragma unroll
for (unsigned short j= 0; j<i; j++) {
//if(j>=i) break;
if( filterSet[i].srcIPLen_ > filterSet[j].srcIPLen_ ){
if( genPrefix(filterSet[i].srcIP_, filterSet[j].srcIPLen_) == filterSet[j].srcIP_ ){
if(( (!filterSet[i].pro_num_ || !filterSet[j].pro_num_) || (filterSet[i].pro_num_ == filterSet[j].pro_num_ )) ) {
if (
(
((filterSet[i].destPortleft_ <= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ >= filterSet[j].destPortleft_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ <= filterSet[j].destPortright_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_))
)
&&
(
((filterSet[i].srcPortleft_ <= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ >= filterSet[j].srcPortleft_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ <= filterSet[j].srcPortright_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_))
)
)
{
if (filterSet[i].destIPLen_ > filterSet[j].destIPLen_ ){
if ( genPrefix(filterSet[i].destIP_, filterSet[j].destIPLen_) == filterSet[j].destIP_){
//counter1++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal[threadIndex].result++;
//shm[threadIndex]++;
total++;
}
}
if (filterSet[i].destIPLen_ < filterSet[j].destIPLen_){
if ( genPrefix(filterSet[j].destIP_, filterSet[i].destIPLen_) == filterSet[i].destIP_){
//counter2++;
/*
if(
( (rule.filterSet_[i].destPortleft_ <= rule.filterSet_[j].destPortleft_) && (rule.filterSet_[i].destPortright_ >= rule.filterSet_[j].destPortright_) ) &&
( (rule.filterSet_[i].srcPortleft_ <= rule.filterSet_[j].srcPortleft_) && (rule.filterSet_[i].srcPortright_ >= rule.filterSet_[j].srcPortright_) ) &&
( (rule.filterSet_[i].pro_num_ == rule.filterSet_[j].pro_num_) || (rule.filterSet_[i].pro_num_ == 0) )
){}
else
*/
//devTotal[threadIndex].result++;
//shm[threadIndex]++;
total++;
}
}
if (filterSet[i].destIPLen_ == filterSet[j].destIPLen_){
if ( filterSet[i].destIP_ == filterSet[j].destIP_){
//counter3++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
//devTotal[threadIndex].result++;
//shm[threadIndex]++;
total++;
}
}
}
}
}
}
}
}
//printf("%d\t", total);
total = blockReduceSum(total);
if(threadIdx.x == 0)
d_total[blockIdx.x] = total;
}
/*
__global__ void gpu_conflict_detect_2(const filter * __restrict__ filterSet, int rule_size, dev_var* devTotal)
{
//printf("blockIdx.x: %d, threadIdx.x: %d\n", blockIdx.x, threadIdx.x);
int threadID = threadIdx.x * gridDim.x + blockIdx.x;
int threadSize = gridDim.x * blockDim.x;
int total = 0;
//devTotal[threadID].set = 1;
//printf("threadSize:%d,%d\n", threadSize, rule_size/threadSize + 1);
for (int i=threadID; i<rule_size; i+=threadSize) {
//for(int i=threadID; i<rule_size; i+=threadSize){
//devTotal[i].result = 0;
for (int j= 0; j<rule_size ; j++) {
if( __ldg(&filterSet[i].srcIPLen_) > __ldg(&filterSet[j].srcIPLen_) ){
if( genPrefix(__ldg(&filterSet[i].srcIP_), __ldg(&filterSet[j].srcIPLen_)) == __ldg(&filterSet[j].srcIP_) ){
if(( (!__ldg(&filterSet[i].pro_num_) || !__ldg(&filterSet[j].pro_num_)) || (__ldg(&filterSet[i].pro_num_) == __ldg(&filterSet[j].pro_num_) )) ) {
if (
(
((__ldg(&filterSet[i].destPortleft_) <= __ldg(&filterSet[j].destPortright_)) && (__ldg(&filterSet[i].destPortleft_) >= __ldg(&filterSet[j].destPortleft_))) ||
((__ldg(&filterSet[i].destPortright_) >= __ldg(&filterSet[j].destPortleft_)) && (__ldg(&filterSet[i].destPortright_) <= __ldg(&filterSet[j].destPortright_))) ||
((__ldg(&filterSet[i].destPortright_) >= __ldg(&filterSet[j].destPortright_)) && (__ldg(&filterSet[i].destPortleft_) <= __ldg(&filterSet[j].destPortleft_)))
)
&&
(
((__ldg(&filterSet[i].srcPortleft_) <= __ldg(&filterSet[j].srcPortright_)) && (__ldg(&filterSet[i].srcPortleft_) >= __ldg(&filterSet[j].srcPortleft_))) ||
((__ldg(&filterSet[i].srcPortright_) >= __ldg(&filterSet[j].srcPortleft_)) && (__ldg(&filterSet[i].srcPortright_) <= __ldg(&filterSet[j].srcPortright_))) ||
((__ldg(&filterSet[i].srcPortright_) >= __ldg(&filterSet[j].srcPortright_)) && (__ldg(&filterSet[i].srcPortleft_) <= __ldg(&filterSet[j].srcPortleft_)))
)
)
{
if (__ldg(&filterSet[i].destIPLen_) > __ldg(&filterSet[j].destIPLen_) ){
if ( genPrefix(__ldg(&filterSet[i].destIP_), __ldg(&filterSet[j].destIPLen_)) == __ldg(&filterSet[j].destIP_)){
//counter1++;
if(
( (__ldg(&filterSet[i].destPortleft_) <= __ldg(&filterSet[j].destPortleft_)) && (__ldg(&filterSet[i].destPortright_) >= __ldg(&filterSet[j].destPortright_)) ) &&
( (__ldg(&filterSet[i].srcPortleft_) <= __ldg(&filterSet[j].srcPortleft_)) && (__ldg(&filterSet[i].srcPortright_) >= __ldg(&filterSet[j].srcPortright_)) ) &&
( (__ldg(&filterSet[i].pro_num_) == __ldg(&filterSet[j].pro_num_)) || (__ldg(&filterSet[i].pro_num_) == 0) )
){}
else
//devTotal++;
//devTotal[threadID].result++;
total++;
}
}
if (__ldg(&filterSet[i].destIPLen_) < __ldg(&filterSet[j].destIPLen_)){
if ( genPrefix(__ldg(&filterSet[j].destIP_), __ldg(&filterSet[i].destIPLen_)) == __ldg(&filterSet[i].destIP_)){
//counter2++;
//if(
//( (rule.__ldg(&filterSet_[i].destPortleft_) <= rule.__ldg(&filterSet_[j].destPortleft_)) && (rule.__ldg(&filterSet_[i].destPortright_) >= rule.__ldg(&filterSet_[j].destPortright_)) ) &&
//( (rule.__ldg(&filterSet_[i].srcPortleft_) <= rule.__ldg(&filterSet_[j].srcPortleft_)) && (rule.__ldg(&filterSet_[i].srcPortright_) >= rule.__ldg(&filterSet_[j].srcPortright_)) ) &&
//( (rule.__ldg(&filterSet_[i].pro_num_) == rule.__ldg(&filterSet_[j].pro_num_)) || (rule.__ldg(&filterSet_[i].pro_num_) == 0) )
//){}
//else
//devTotal++;
//devTotal[threadID].result++;
total++;
}
}
if (__ldg(&filterSet[i].destIPLen_) == __ldg(&filterSet[j].destIPLen_)){
if ( __ldg(&filterSet[i].destIP_) == __ldg(&filterSet[j].destIP_)){
//counter3++;
if(
( (__ldg(&filterSet[i].destPortleft_) <= __ldg(&filterSet[j].destPortleft_)) && (__ldg(&filterSet[i].destPortright_) >= __ldg(&filterSet[j].destPortright_)) ) &&
( (__ldg(&filterSet[i].srcPortleft_) <= __ldg(&filterSet[j].srcPortleft_)) && (__ldg(&filterSet[i].srcPortright_) >= __ldg(&filterSet[j].srcPortright_)) ) &&
( (__ldg(&filterSet[i].pro_num_) == __ldg(&filterSet[j].pro_num_)) || (__ldg(&filterSet[i].pro_num_) == 0) )
){}
else
//devTotal++;
//devTotal[threadID].result++;
total++;
}
}
}
}
}
}
}
}
//devTotal[threadID].set = 1;
//devTotal[threadID].result += total;
//__syncthreads();
//devTotal[threadIdx.x].set = 1;
//devTotal[threadIdx.x].result += shm[threadIdx.x];
//printf("%d\t", devTotal[threadIdx.x].result);
}
*/
void host_conflict_detect(filter *filterSet, int rule_size, int &total)
{
for (int i=0; i<rule_size; i++) {
for (int j= 0; j<rule_size; j++) {
if( (filterSet[i].srcIPLen_ > filterSet[j].srcIPLen_ ) && ( genPrefix(filterSet[i].srcIP_, filterSet[j].srcIPLen_) == filterSet[j].srcIP_ ) ){
if(( (!filterSet[i].pro_num_ || !filterSet[j].pro_num_) || (filterSet[i].pro_num_ == filterSet[j].pro_num_ )) ) {
if (
(
((filterSet[i].destPortleft_ <= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ >= filterSet[j].destPortleft_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ <= filterSet[j].destPortright_)) ||
((filterSet[i].destPortright_ >= filterSet[j].destPortright_) && (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_))
)
&&
(
((filterSet[i].srcPortleft_ <= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ >= filterSet[j].srcPortleft_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ <= filterSet[j].srcPortright_)) ||
((filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) && (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_))
)
)
{
if (filterSet[i].destIPLen_ > filterSet[j].destIPLen_ ){
if ( genPrefix(filterSet[i].destIP_, filterSet[j].destIPLen_) == filterSet[j].destIP_){
//counter1++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
total++;
}
}
if (filterSet[i].destIPLen_ < filterSet[j].destIPLen_){
if ( genPrefix(filterSet[j].destIP_, filterSet[i].destIPLen_) == filterSet[i].destIP_){
//counter2++;
/*
if(
( (rule.filterSet_[i].destPortleft_ <= rule.filterSet_[j].destPortleft_) && (rule.filterSet_[i].destPortright_ >= rule.filterSet_[j].destPortright_) ) &&
( (rule.filterSet_[i].srcPortleft_ <= rule.filterSet_[j].srcPortleft_) && (rule.filterSet_[i].srcPortright_ >= rule.filterSet_[j].srcPortright_) ) &&
( (rule.filterSet_[i].pro_num_ == rule.filterSet_[j].pro_num_) || (rule.filterSet_[i].pro_num_ == 0) )
){}
else
*/
total++;
}
}
if (filterSet[i].destIPLen_ == filterSet[j].destIPLen_){
if ( filterSet[i].destIP_ == filterSet[j].destIP_){
//counter3++;
if(
( (filterSet[i].destPortleft_ <= filterSet[j].destPortleft_) && (filterSet[i].destPortright_ >= filterSet[j].destPortright_) ) &&
( (filterSet[i].srcPortleft_ <= filterSet[j].srcPortleft_) && (filterSet[i].srcPortright_ >= filterSet[j].srcPortright_) ) &&
( (filterSet[i].pro_num_ == filterSet[j].pro_num_) || (filterSet[i].pro_num_ == 0) )
){}
else
total++;
}
}
}
}
}
}
}
}
int main(int argc, char *argv[])
{
ofstream fout;
//fout.open("0727M2.txt", ios::app);
double sort_start, hconflict_start, g1conflict_start, g2conflict_start, g0conflict_start, g3conflict_start, g3copy1_start, g3copy2_start;
double sort_time, hconflict_time, g1conflict_time, g2conflict_time, g0conflict_time, g3conflict_time, g3copy1_time, g3copy2_time;
int db_size;
int total = 0;// for host computaion result
int *d_total;// store device result
int *h_total;// store host result
unsigned long sum = 0;
// CUDA variable
filter* dev_filterSet; // store filter in device
//insert filters
if (db.loadFilters(argv[1]) == -1)
err_quit("Error: unable to open the filter database.");
db_size = db.size(); // get database size
printf("number of rules: %d\n", db_size);
int block = 512;
int grid = 8;
//int grid =( db_size + block - 1)/block;
//if(grid == 0)
//grid = 1;
// set to 0 for copying to device variable
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
//cudaSetDevice(0);
// ################ Prepare call M0 funtion ###################
// prepare device variable
cudaMalloc((void**)&dev_filterSet, db_size * sizeof(filter));
cudaCheckErrors("cudaMalloc1 fail");
cudaMalloc((void**)&d_total, grid*sizeof(int));
cudaCheckErrors("cudaMalloc2 fail");
cudaMemset(d_total, 0, grid*sizeof(int));
h_total = (int*)malloc(grid*sizeof(int));
// call kernel function
g0conflict_start = cpuSecond();
// copy to device
cudaMemcpy(dev_filterSet, db.filterSet_, db_size * sizeof(filter), cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy1 fail");
gpu_conflict_detect_0<<<grid, block>>>(dev_filterSet, db_size, d_total);
//cudaDeviceSynchronize();
// copy device result ot host
cudaMemcpy(h_total, d_total, grid*sizeof(int), cudaMemcpyDeviceToHost);
sum = 0;
for(int tmp = 0; tmp < grid; tmp++)
sum += h_total[tmp];
g0conflict_time = cpuSecond() - g0conflict_start;
cout << "m0_detection_time: " << g0conflict_time << " usec" << endl;
printf("m0 conflict detection: %lu\t", sum);
//for(int tmp = 0; tmp < grid; tmp++)
//cout<< h_total[tmp]<< "\t";
cout<<endl;
//fout<< g0conflict_time<< "\t";
//for(int tmp = 0; tmp < grid; tmp++)
//fout<< h_total[tmp]<< "\t";
//fout<< "\n";
free(h_total);
cudaFree(dev_filterSet);
cudaFree(d_total);
cudaDeviceReset();
// sort filter with srcIPlen & desIPlen
sort_start = cpuSecond();
bubble_sort(db, db_size);
sort_time = cpuSecond() - sort_start;
//cout << "sort_time: " << sort_time << " usec" << endl;
/*
// ################ Prepare call M1 funtion ###################
// prepare device variable
cudaMalloc((void**)&dev_filterSet, db_size * sizeof(filter));
cudaCheckErrors("cudaMalloc1 fail");
cudaMalloc((void**)&d_total, grid*sizeof(int));
cudaCheckErrors("cudaMalloc2 fail");
cudaMemset(d_total, 0, grid*sizeof(int));
h_total = (int*)malloc(grid*sizeof(int));
// call kernel function
g1conflict_start = cpuSecond();
// copy to device
cudaMemcpy(dev_filterSet, db.filterSet_, db_size * sizeof(filter), cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy1 fail");
gpu_conflict_detect_1<<<grid, block>>>(dev_filterSet, db_size, d_total);
//cudaDeviceSynchronize();
// copy device result ot host
cudaMemcpy(h_total, d_total, grid*sizeof(int), cudaMemcpyDeviceToHost);
sum = 0;
for(int tmp = 0; tmp < grid; tmp++)
sum += h_total[tmp];
g1conflict_time = cpuSecond() - g1conflict_start;
cout << "m1_detection_time: " << g1conflict_time << " usec" << endl;
printf("m1 conflict detection: %d\t", sum);
//for(int tmp = 0; tmp < grid; tmp++)
//cout<< h_total[tmp]<< "\t";
cout<<endl;
//fout<< g1conflict_time<< "\t";
//for(int tmp = 0; tmp < grid; tmp++)
//fout<< h_total[tmp]<< "\t";
//fout<< "\n";
free(h_total);
cudaFree(dev_filterSet);
cudaFree(d_total);
cudaDeviceReset();
*/
// ################ Prepare call M2 funtion ###################
// prepare device variable
cudaMalloc((void**)&dev_filterSet, db_size * sizeof(filter));
cudaCheckErrors("cudaMalloc1 fail");
cudaMalloc((void**)&d_total, grid*sizeof(int));
cudaCheckErrors("cudaMalloc2 fail");
cudaMemset(d_total, 0, grid*sizeof(int));
h_total = (int*)malloc(grid*sizeof(int));
// call kernel function method 2
g2conflict_start = cpuSecond();
// copy to device
cudaMemcpy(dev_filterSet, db.filterSet_, db_size * sizeof(filter), cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy1 fail");
gpu_conflict_detect_2<<<grid, block>>>(dev_filterSet, db_size, d_total);
//cudaDeviceSynchronize();
// copy device result ot host
cudaMemcpy(h_total, d_total, grid*sizeof(int), cudaMemcpyDeviceToHost);
sum = 0;
for(int tmp = 0; tmp < grid; tmp++)
sum += h_total[tmp];
g2conflict_time = cpuSecond() - g2conflict_start;
cout << "m2_detection_time: " << g2conflict_time << " usec" << endl;
printf("m2 conflict detection: %lu\t", sum);
//for(int tmp = 0; tmp < grid; tmp++)
//cout<< h_total[tmp]<< "\t";
cout<<endl;
//fout<< g2conflict_time<< "\t";
//for(int tmp = 0; tmp < grid; tmp++)
//fout<< h_total[tmp]<< "\t";
//fout<< "\n";
free(h_total);
cudaFree(dev_filterSet);
cudaFree(d_total);
cudaDeviceReset();
// ################ Prepare call M3 funtion ###################
// prepare device variable
cudaMalloc((void**)&dev_filterSet, db_size * sizeof(filter));
cudaCheckErrors("cudaMalloc1 fail");
cudaMalloc((void**)&d_total, grid*sizeof(int));
cudaCheckErrors("cudaMalloc2 fail");
cudaMemset(d_total, 0, grid*sizeof(int));
h_total = (int*)malloc(grid*sizeof(int));
// call kernel function
//g3conflict_start = cpuSecond();
g3copy1_start = cpuSecond();
// copy to device
cudaMemcpy(dev_filterSet, db.filterSet_, db_size * sizeof(filter), cudaMemcpyHostToDevice);
g3copy1_time = cpuSecond() - g3copy1_start;
cudaCheckErrors("cudaMemcpy1 fail");
g3conflict_start = cpuSecond();
gpu_conflict_detect_3<<<grid, block>>>(dev_filterSet, db_size, d_total);
//cudaDeviceSynchronize();
g3conflict_time = cpuSecond() - g3conflict_start;
g3copy2_start = cpuSecond();
// copy device result ot host
cudaMemcpy(h_total, d_total, grid*sizeof(int), cudaMemcpyDeviceToHost);
g3copy2_time = cpuSecond() - g3copy2_start;
sum = 0;
for(int tmp = 0; tmp < grid; tmp++)
sum += h_total[tmp];
cout << "m3_copy1_time: " << g3copy1_time << " usec" << endl;
cout << "m3_detection_time: " << g3conflict_time << " usec" << endl;
cout << "m3_copy2_time: " << g3copy2_time << " usec" << endl;
printf("m3 conflict detection: %d\t", sum);
//for(int tmp = 0; tmp < grid; tmp++)
//cout<< h_total[tmp]<< "\t";
cout<<endl;
//fout<< g1conflict_time<< "\t";
//for(int tmp = 0; tmp < grid; tmp++)
//fout<< h_total[tmp]<< "\t";
//fout<< "\n";
free(h_total);
cudaFree(dev_filterSet);
cudaFree(d_total);
cudaDeviceReset();
// ################ Prepare call host funtion ###################
hconflict_start = cpuSecond();
host_conflict_detect(db.filterSet_, db_size, total);
hconflict_time = cpuSecond() - hconflict_start;
cout.setf(ios::fixed);
cout << std::noshowpoint << "host_detection_time: " << hconflict_time << " usec" << endl;
printf("Host conflict detection: %d\n", total);
cout<<endl;
fout.open("0818_5128_fw5_5.txt", ios::app);
fout<< hconflict_time<< "\t";
fout<< g0conflict_time<< "\t";
//fout<< g1conflict_time<< "\t";
fout<< g2conflict_time<< "\t";
fout<< g3conflict_time<< "\n";
fout.close();
//free(cpy_dev);
//cudaFree(dev_filterSet);
//cudaFree(dev_total);
}
|
bbe1a5495826aa24f7e7d76ef065d624d75daabc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cutil.h>
#include <fcntl.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include "rotate.h"
#include "convert.h"
#include <cutil.h>
#include <assert.h>
#include "cuda_common.cuh"
#define uint unsigned int
#define IMUL(a, b) __mul24(a, b)
#define TEXTON32 1
#define TEXTON64 2
int* computeGoldenIntegrals(int width, int height, int nbins, int* inputImage) {
/// width* height*nbins
int* integrals = (int*)malloc(sizeof(int)*width*height*nbins);
memset(integrals, 0, sizeof(int) * width * height * nbins);
for(int bin = 0; bin < nbins; bin++) {
for(int row = 0; row < height; row++) {
for(int col = 0; col < width; col++) {
int integralValue = 0;
if (row == 0) {
if (col == 0) {
integralValue = ((inputImage[0] == bin) ? 1 : 0);
} else {
integralValue = integrals[(col - 1) * nbins + bin] + ((inputImage[col] == bin) ? 1 : 0);
}
} else {
if (col == 0) {
integralValue = integrals[((row - 1) * width) * nbins + bin] + ((inputImage[row * width] == bin) ? 1 : 0);
} else {
integralValue = integrals[((row - 1) * width + col) * nbins + bin] + integrals[(row * width + col - 1)*nbins + bin] - integrals[((row - 1) * width + col - 1) * nbins + bin] + ((inputImage[row * width + col] == bin) ? 1 : 0);
}
}
integrals[(row * width + col)*nbins + bin] = integralValue;
}
}
}
return integrals;
}
void checkIntegrals(int width, int height, int nbins, int* goldenIntegrals, int goldenIntegralPitch, int* suspectIntegrals, int suspectIntegralPitch) {
bool error = false;
for(int row = 0; row < height; row++) {
for(int col = 0; col < width; col++) {
for(int bin = 0; bin < nbins; bin++) {
if (goldenIntegrals[(row * width + col) * goldenIntegralPitch + bin] !=
suspectIntegrals[(row * width + col) * suspectIntegralPitch + bin]) {
printf("Error at: %d, %d, %d\n", row, col, bin);
error = true;
}
}
}
}
if (!error) {
printf("Integrals check out!\n");
}
}
__global__ void integrateBins(int width, int height, int nbins, int* devImage, int binPitch, int* devIntegrals) {
__shared__ int pixels[16];
const int blockX = blockDim.y * blockIdx.x;
const int threadX = threadIdx.y;
const int bin = threadIdx.x;
const int x = blockX + threadX;
if (x >= width) return;
if (bin > nbins) return;
int* imagePointer = devImage + x;
int* outputPointer = devIntegrals + binPitch * x + bin;
int accumulant = 0;
for(int y = 0; y < height; y++) {
if (bin == 0) {
pixels[threadX] = *imagePointer;
}
__syncthreads();
if (pixels[threadX] == bin) accumulant++;
*outputPointer = accumulant;
imagePointer += width;
outputPointer += width * binPitch;
}
}
__global__ void integrateBinsT(int width, int height, int nbins, int binPitch, int* devIntegrals) {
const int blockY = blockDim.y * blockIdx.x;
const int threadY = threadIdx.y;
const int bin = threadIdx.x;
const int y = blockY + threadY;
if (y >= height) return;
if (bin >= binPitch) return;
int* imagePointer = devIntegrals + binPitch * y * width + bin;
int accumulant = 0;
for(int x = 0; x < width; x++) {
accumulant += *imagePointer;
*imagePointer = accumulant;
imagePointer += binPitch;
}
}
/**
* For a given orientation, computes the integral images for each of the histogram bins
*/
void formIntegralImages(int width, int height, int nbins, int* devImage,
int binPitch, int* devIntegrals)
{
int pixelsPerCTA = 4;
dim3 gridDim = dim3((width - 1) / pixelsPerCTA + 1);
dim3 blockDim = dim3(nbins, pixelsPerCTA);
hipLaunchKernelGGL(( integrateBins), dim3(gridDim), dim3(blockDim), 0, 0, width, height, nbins, devImage, binPitch, devIntegrals);
gridDim = dim3((height - 1)/pixelsPerCTA + 1);
hipLaunchKernelGGL(( integrateBinsT), dim3(gridDim), dim3(blockDim), 0, 0, width, height, nbins, binPitch, devIntegrals);
}
//
// float* getImage(uint width, uint height, float* devImage) {
// int imageSize = width * height * sizeof(float);
// float* result = (float*)malloc(imageSize);
// CUDA_SAFE_CALL(hipMemcpy(result, devImage, imageSize, hipMemcpyDeviceToHost));
// return result;
// }
//
// int* getImage(uint width, uint height, int* devImage) {
// int imageSize = width * height * sizeof(int);
// int* result = (int*)malloc(imageSize);
// CUDA_SAFE_CALL(hipMemcpy(result, devImage, imageSize, hipMemcpyDeviceToHost));
// return result;
// }
int findPitchInInts(int width) {
/* int* test; */
/* size_t pitch; */
/* hipMallocPitch((void**)&test, &pitch, width * sizeof(int), 1); */
/* hipFree(test); */
/* return pitch/sizeof(int); */
return ((width - 1)/16 + 1) * 16;
}
int pixelPitch;
int binPitch;
int border;
int width;
int height;
int borderWidth;
int borderHeight;
int* devQuantized;
int* devMirrored;
float* devGradientA;
float* devGradientB;
int* devTurned;
int* devImageT;
int* devIntegralCol;
int* devIntegralColT;
int* devIntegralsT;
int* devIntegrals;
float* devGradients;
uint norients;
uint nscale;
int initializeGradients(uint widthIn, uint heightIn, uint borderIn, uint maxbins, uint norientsIn, uint nscaleIn, uint textonChoice) {
width = widthIn;
height = heightIn;
border = borderIn;
norients = norientsIn;
nscale = nscaleIn;
borderWidth = width + 2 * border;
borderHeight = height + 2 * border;
CUDA_SAFE_CALL(hipMalloc((void**)&devGradients, sizeof(float) * norients * nscale * borderWidth * borderHeight));
CUDA_SAFE_CALL(hipMalloc((void**)&devQuantized, sizeof(int) * width * height));
CUDA_SAFE_CALL(hipMalloc((void**)&devMirrored, sizeof(int) * borderWidth * borderHeight));
CUDA_SAFE_CALL(hipMalloc((void**)&devGradientA, sizeof(float) * width * height));
CUDA_SAFE_CALL(hipMalloc((void**)&devGradientB, sizeof(float) * width * height));
int maxWidth = borderWidth + borderHeight;
int maxHeight = maxWidth;
int maxBins = 32;
if (textonChoice == TEXTON64)
maxBins = 64;
//pixelPitch = findPitchInInts(maxWidth * maxHeight);
binPitch = findPitchInInts(maxBins);
CUDA_SAFE_CALL(hipMalloc((void**)&devTurned, sizeof(int) * maxWidth * maxHeight));
CUDA_SAFE_CALL(hipMalloc((void**)&devIntegrals, sizeof(int) * binPitch * maxWidth * maxHeight));
return binPitch;
}
void finalizeGradients() {
CUDA_SAFE_CALL(hipFree(devIntegrals));
CUDA_SAFE_CALL(hipFree(devTurned));
CUDA_SAFE_CALL(hipFree(devGradientB));
CUDA_SAFE_CALL(hipFree(devGradientA));
CUDA_SAFE_CALL(hipFree(devMirrored));
CUDA_SAFE_CALL(hipFree(devQuantized));
CUDA_SAFE_CALL(hipFree(devGradients));
}
float* gradients(float* devImage, uint nbins, bool blur, float sigma, uint* radii, int textonChoice) {
quantizeImage(width, height, nbins, devImage, devQuantized);
mirrorImage(width, height, border, devQuantized, devMirrored);
for(int orientation = 0; orientation < norients/2; orientation++) {
float thetaPi = -float(orientation)/float(norients);
int newWidth;
int newHeight;
rotateImage(borderWidth, borderHeight, devMirrored, thetaPi, newWidth, newHeight, devTurned);
int* devTurnedImage = devTurned;
if (orientation == 0) {
devTurnedImage = devMirrored;
}
formIntegralImages(newWidth, newHeight, nbins, devTurnedImage, binPitch, devIntegrals);
for (int scale = 0; scale < nscale; scale++) {
if (TEXTON32 == textonChoice)
{
dispatchGradient(
false, width, height, border, nbins,
thetaPi, newWidth, radii[scale], blur, (int)(sigma*(float)nbins),
devIntegrals, binPitch, devGradientA, devGradientB);
}
else
{
dispatchGradient_64(
width, height, border, nbins, thetaPi,
newWidth, radii[scale], blur, (int)(sigma*(float)nbins),
devIntegrals, binPitch, devGradientA, devGradientB);
}
mirrorImage(width, height, border, devGradientA,
&devGradients[borderWidth * borderHeight * (scale * norients + orientation + norients / 2)]);
mirrorImage(width, height, border, devGradientB,
&devGradients[borderWidth * borderHeight * (scale * norients + orientation)]);
}
}
return devGradients;
}
float* gradients(int* devImage, uint nbins, bool blur, float sigma, uint* radii, int textonChoice) {
mirrorImage(width, height, border, devImage, devMirrored);
for(int orientation = 0; orientation < norients/2; orientation++) {
float thetaPi = -float(orientation)/float(norients);
int newWidth;
int newHeight;
rotateImage(borderWidth, borderHeight, devMirrored, thetaPi, newWidth, newHeight, devTurned);
int* devTurnedImage = devTurned;
if (orientation == 0) {
devTurnedImage = devMirrored;
}
formIntegralImages(newWidth, newHeight, nbins, devTurnedImage, binPitch, devIntegrals);
for (int scale = 0; scale < nscale; scale++) {
if (TEXTON32 == textonChoice)
{
dispatchGradient(true, width, height, border, nbins, thetaPi, newWidth, radii[scale], blur, (int)(sigma*(float)nbins), devIntegrals, binPitch, devGradientA, devGradientB);
}
else
{
dispatchGradient_64(width, height, border, nbins, thetaPi, newWidth, radii[scale], blur, (int)(sigma*(float)nbins), devIntegrals, binPitch, devGradientA, devGradientB);
}
mirrorImage(width, height, border, devGradientA, &devGradients[borderWidth * borderHeight * (scale * norients + orientation + norients / 2)]);
mirrorImage(width, height, border, devGradientB, &devGradients[borderWidth * borderHeight * (scale * norients + orientation)]);
}
}
return devGradients;
}
|
bbe1a5495826aa24f7e7d76ef065d624d75daabc.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cutil.h>
#include <fcntl.h>
#include <unistd.h>
#include <cuda.h>
#include "rotate.h"
#include "convert.h"
#include <cutil.h>
#include <assert.h>
#include "cuda_common.cuh"
#define uint unsigned int
#define IMUL(a, b) __mul24(a, b)
#define TEXTON32 1
#define TEXTON64 2
int* computeGoldenIntegrals(int width, int height, int nbins, int* inputImage) {
/// 积分数组大小为width* height*nbins
int* integrals = (int*)malloc(sizeof(int)*width*height*nbins);
memset(integrals, 0, sizeof(int) * width * height * nbins);
for(int bin = 0; bin < nbins; bin++) {
for(int row = 0; row < height; row++) {
for(int col = 0; col < width; col++) {
int integralValue = 0;
if (row == 0) {
if (col == 0) {
integralValue = ((inputImage[0] == bin) ? 1 : 0);
} else {
integralValue = integrals[(col - 1) * nbins + bin] + ((inputImage[col] == bin) ? 1 : 0);
}
} else {
if (col == 0) {
integralValue = integrals[((row - 1) * width) * nbins + bin] + ((inputImage[row * width] == bin) ? 1 : 0);
} else {
integralValue = integrals[((row - 1) * width + col) * nbins + bin] + integrals[(row * width + col - 1)*nbins + bin] - integrals[((row - 1) * width + col - 1) * nbins + bin] + ((inputImage[row * width + col] == bin) ? 1 : 0);
}
}
integrals[(row * width + col)*nbins + bin] = integralValue;
}
}
}
return integrals;
}
void checkIntegrals(int width, int height, int nbins, int* goldenIntegrals, int goldenIntegralPitch, int* suspectIntegrals, int suspectIntegralPitch) {
bool error = false;
for(int row = 0; row < height; row++) {
for(int col = 0; col < width; col++) {
for(int bin = 0; bin < nbins; bin++) {
if (goldenIntegrals[(row * width + col) * goldenIntegralPitch + bin] !=
suspectIntegrals[(row * width + col) * suspectIntegralPitch + bin]) {
printf("Error at: %d, %d, %d\n", row, col, bin);
error = true;
}
}
}
}
if (!error) {
printf("Integrals check out!\n");
}
}
__global__ void integrateBins(int width, int height, int nbins, int* devImage, int binPitch, int* devIntegrals) {
__shared__ int pixels[16];
const int blockX = blockDim.y * blockIdx.x;
const int threadX = threadIdx.y;
const int bin = threadIdx.x;
const int x = blockX + threadX;
if (x >= width) return;
if (bin > nbins) return;
int* imagePointer = devImage + x;
int* outputPointer = devIntegrals + binPitch * x + bin;
int accumulant = 0;
for(int y = 0; y < height; y++) {
if (bin == 0) {
pixels[threadX] = *imagePointer;
}
__syncthreads();
if (pixels[threadX] == bin) accumulant++;
*outputPointer = accumulant;
imagePointer += width;
outputPointer += width * binPitch;
}
}
__global__ void integrateBinsT(int width, int height, int nbins, int binPitch, int* devIntegrals) {
const int blockY = blockDim.y * blockIdx.x;
const int threadY = threadIdx.y;
const int bin = threadIdx.x;
const int y = blockY + threadY;
if (y >= height) return;
if (bin >= binPitch) return;
int* imagePointer = devIntegrals + binPitch * y * width + bin;
int accumulant = 0;
for(int x = 0; x < width; x++) {
accumulant += *imagePointer;
*imagePointer = accumulant;
imagePointer += binPitch;
}
}
/**
* For a given orientation, computes the integral images for each of the histogram bins
*/
void formIntegralImages(int width, int height, int nbins, int* devImage,
int binPitch, int* devIntegrals)
{
int pixelsPerCTA = 4;
dim3 gridDim = dim3((width - 1) / pixelsPerCTA + 1);
dim3 blockDim = dim3(nbins, pixelsPerCTA);
integrateBins<<<gridDim, blockDim>>>(width, height, nbins, devImage, binPitch, devIntegrals);
gridDim = dim3((height - 1)/pixelsPerCTA + 1);
integrateBinsT<<<gridDim, blockDim>>>(width, height, nbins, binPitch, devIntegrals);
}
//
// float* getImage(uint width, uint height, float* devImage) {
// int imageSize = width * height * sizeof(float);
// float* result = (float*)malloc(imageSize);
// CUDA_SAFE_CALL(cudaMemcpy(result, devImage, imageSize, cudaMemcpyDeviceToHost));
// return result;
// }
//
// int* getImage(uint width, uint height, int* devImage) {
// int imageSize = width * height * sizeof(int);
// int* result = (int*)malloc(imageSize);
// CUDA_SAFE_CALL(cudaMemcpy(result, devImage, imageSize, cudaMemcpyDeviceToHost));
// return result;
// }
int findPitchInInts(int width) {
/* int* test; */
/* size_t pitch; */
/* cudaMallocPitch((void**)&test, &pitch, width * sizeof(int), 1); */
/* cudaFree(test); */
/* return pitch/sizeof(int); */
return ((width - 1)/16 + 1) * 16;
}
int pixelPitch;
int binPitch;
int border;
int width;
int height;
int borderWidth;
int borderHeight;
int* devQuantized;
int* devMirrored;
float* devGradientA;
float* devGradientB;
int* devTurned;
int* devImageT;
int* devIntegralCol;
int* devIntegralColT;
int* devIntegralsT;
int* devIntegrals;
float* devGradients;
uint norients;
uint nscale;
int initializeGradients(uint widthIn, uint heightIn, uint borderIn, uint maxbins, uint norientsIn, uint nscaleIn, uint textonChoice) {
width = widthIn;
height = heightIn;
border = borderIn;
norients = norientsIn;
nscale = nscaleIn;
borderWidth = width + 2 * border;
borderHeight = height + 2 * border;
CUDA_SAFE_CALL(cudaMalloc((void**)&devGradients, sizeof(float) * norients * nscale * borderWidth * borderHeight));
CUDA_SAFE_CALL(cudaMalloc((void**)&devQuantized, sizeof(int) * width * height));
CUDA_SAFE_CALL(cudaMalloc((void**)&devMirrored, sizeof(int) * borderWidth * borderHeight));
CUDA_SAFE_CALL(cudaMalloc((void**)&devGradientA, sizeof(float) * width * height));
CUDA_SAFE_CALL(cudaMalloc((void**)&devGradientB, sizeof(float) * width * height));
int maxWidth = borderWidth + borderHeight;
int maxHeight = maxWidth;
int maxBins = 32;
if (textonChoice == TEXTON64)
maxBins = 64;
//pixelPitch = findPitchInInts(maxWidth * maxHeight);
binPitch = findPitchInInts(maxBins);
CUDA_SAFE_CALL(cudaMalloc((void**)&devTurned, sizeof(int) * maxWidth * maxHeight));
CUDA_SAFE_CALL(cudaMalloc((void**)&devIntegrals, sizeof(int) * binPitch * maxWidth * maxHeight));
return binPitch;
}
void finalizeGradients() {
CUDA_SAFE_CALL(cudaFree(devIntegrals));
CUDA_SAFE_CALL(cudaFree(devTurned));
CUDA_SAFE_CALL(cudaFree(devGradientB));
CUDA_SAFE_CALL(cudaFree(devGradientA));
CUDA_SAFE_CALL(cudaFree(devMirrored));
CUDA_SAFE_CALL(cudaFree(devQuantized));
CUDA_SAFE_CALL(cudaFree(devGradients));
}
float* gradients(float* devImage, uint nbins, bool blur, float sigma, uint* radii, int textonChoice) {
quantizeImage(width, height, nbins, devImage, devQuantized);
mirrorImage(width, height, border, devQuantized, devMirrored);
for(int orientation = 0; orientation < norients/2; orientation++) {
float thetaPi = -float(orientation)/float(norients);
int newWidth;
int newHeight;
rotateImage(borderWidth, borderHeight, devMirrored, thetaPi, newWidth, newHeight, devTurned);
int* devTurnedImage = devTurned;
if (orientation == 0) {
devTurnedImage = devMirrored;
}
formIntegralImages(newWidth, newHeight, nbins, devTurnedImage, binPitch, devIntegrals);
for (int scale = 0; scale < nscale; scale++) {
if (TEXTON32 == textonChoice)
{
dispatchGradient(
false, width, height, border, nbins,
thetaPi, newWidth, radii[scale], blur, (int)(sigma*(float)nbins),
devIntegrals, binPitch, devGradientA, devGradientB);
}
else
{
dispatchGradient_64(
width, height, border, nbins, thetaPi,
newWidth, radii[scale], blur, (int)(sigma*(float)nbins),
devIntegrals, binPitch, devGradientA, devGradientB);
}
mirrorImage(width, height, border, devGradientA,
&devGradients[borderWidth * borderHeight * (scale * norients + orientation + norients / 2)]);
mirrorImage(width, height, border, devGradientB,
&devGradients[borderWidth * borderHeight * (scale * norients + orientation)]);
}
}
return devGradients;
}
float* gradients(int* devImage, uint nbins, bool blur, float sigma, uint* radii, int textonChoice) {
mirrorImage(width, height, border, devImage, devMirrored);
for(int orientation = 0; orientation < norients/2; orientation++) {
float thetaPi = -float(orientation)/float(norients);
int newWidth;
int newHeight;
rotateImage(borderWidth, borderHeight, devMirrored, thetaPi, newWidth, newHeight, devTurned);
int* devTurnedImage = devTurned;
if (orientation == 0) {
devTurnedImage = devMirrored;
}
formIntegralImages(newWidth, newHeight, nbins, devTurnedImage, binPitch, devIntegrals);
for (int scale = 0; scale < nscale; scale++) {
if (TEXTON32 == textonChoice)
{
dispatchGradient(true, width, height, border, nbins, thetaPi, newWidth, radii[scale], blur, (int)(sigma*(float)nbins), devIntegrals, binPitch, devGradientA, devGradientB);
}
else
{
dispatchGradient_64(width, height, border, nbins, thetaPi, newWidth, radii[scale], blur, (int)(sigma*(float)nbins), devIntegrals, binPitch, devGradientA, devGradientB);
}
mirrorImage(width, height, border, devGradientA, &devGradients[borderWidth * borderHeight * (scale * norients + orientation + norients / 2)]);
mirrorImage(width, height, border, devGradientB, &devGradients[borderWidth * borderHeight * (scale * norients + orientation)]);
}
}
return devGradients;
}
|
80a0f527fdd0ce068604fbddea8922cb32c86a2a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <sys/mman.h>
#include <hip/driver_types.h>
#include <hip/hip_runtime_api.h>
#include "cuda8803ss.h"
static int
dumpresults(const uint32_t *res,unsigned count){
unsigned z,y,nonzero;
nonzero = 0;
for(z = 0 ; z < count ; z += 8){
for(y = 0 ; y < 8 ; ++y){
if(printf("%9x ",res[z + y]) < 0){
return -1;
}
if(res[z + y]){
++nonzero;
}
}
if(printf("\n") < 0){
return -1;
}
}
if(nonzero == 0){
fprintf(stderr," All-zero results. Kernel probably didn't run.\n");
return -1;
}
return 0;
}
// FIXME: we really ought take a bus specification rather than a device number,
// since the latter are unsafe across hardware removal/additions.
static void
usage(const char *a0){
fprintf(stderr,"usage: %s devno addrmin addrmax\n",a0);
}
int main(int argc,char **argv){
uint32_t hostres[GRID_SIZE * BLOCK_SIZE],*resarr;
unsigned long long min,max;
unsigned unit = 4; // Minimum alignment of references
unsigned long zul;
hipDeviceptr_t ptr;
cudadump_e res;
hipCtx_t ctx;
char *eptr;
int cerr;
if(argc != 4){
usage(*argv);
return CUDARANGER_EXIT_ERROR;
}
if(((zul = strtoul(argv[1],&eptr,0)) == ULONG_MAX && errno == ERANGE)
|| eptr == argv[1] || *eptr){
fprintf(stderr,"Invalid device number: %s\n",argv[1]);
usage(*argv);
return CUDARANGER_EXIT_ERROR;
}
if(((min = strtoull(argv[2],&eptr,0)) == ULLONG_MAX && errno == ERANGE)
|| eptr == argv[2] || *eptr){
fprintf(stderr,"Invalid minimum address: %s\n",argv[2]);
usage(*argv);
return CUDARANGER_EXIT_ERROR;
}
if(((max = strtoull(argv[3],&eptr,0)) == ULLONG_MAX && errno == ERANGE)
|| eptr == argv[3] || *eptr){
fprintf(stderr,"Invalid maximum address: %s\n",argv[3]);
usage(*argv);
return CUDARANGER_EXIT_ERROR;
}
if(max <= min){
fprintf(stderr,"Invalid arguments: max (%ju) <= min (%ju)\n",
max,min);
usage(*argv);
return CUDARANGER_EXIT_ERROR;
}
if((cerr = init_cuda_ctx(zul,&ctx)) != hipSuccess){
fprintf(stderr,"Error initializing CUDA device %lu (%d, %s?)\n",
zul,cerr,hipGetErrorString(hipGetLastError()));
return CUDARANGER_EXIT_ERROR;
}
if(hipMalloc(&resarr,sizeof(hostres)) || hipMemset(resarr,0x00,sizeof(hostres))){
fprintf(stderr,"Error allocating %zu on device %lu (%s?)\n",
sizeof(hostres),zul,hipGetErrorString(hipGetLastError()));
return CUDARANGER_EXIT_ERROR;
}
if(cuda_alloc_max(NULL,&ptr,sizeof(unsigned)) == 0){
fprintf(stderr,"Error allocating max on device %lu (%s?)\n",
zul,hipGetErrorString(hipGetLastError()));
return CUDARANGER_EXIT_ERROR;
}
if((res = dump_cuda(min,max,unit,resarr)) != CUDARANGER_EXIT_SUCCESS){
return res;
}
if(hipDeviceSynchronize()){
return res;
}
if(hipFree(ptr)){
fprintf(stderr,"Warning: couldn't free memory\n");
}
if(hipMemcpy(hostres,resarr,sizeof(hostres),hipMemcpyDeviceToHost)){
fprintf(stderr,"Error copying %zu from device %lu (%s?)\n",
sizeof(hostres),zul,hipGetErrorString(hipGetLastError()));
return CUDARANGER_EXIT_ERROR;
}
if(hipFree(resarr)){
fprintf(stderr,"Couldn't free %zu on device %lu (%s?)\n",
sizeof(hostres),zul,hipGetErrorString(hipGetLastError()));
return CUDARANGER_EXIT_ERROR;
}
if(dumpresults(hostres,sizeof(hostres) / sizeof(*hostres))){
return CUDARANGER_EXIT_ERROR;
}
return CUDARANGER_EXIT_SUCCESS;
}
|
80a0f527fdd0ce068604fbddea8922cb32c86a2a.cu
|
#include <cuda.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <sys/mman.h>
#include <driver_types.h>
#include <cuda_runtime_api.h>
#include "cuda8803ss.h"
static int
dumpresults(const uint32_t *res,unsigned count){
unsigned z,y,nonzero;
nonzero = 0;
for(z = 0 ; z < count ; z += 8){
for(y = 0 ; y < 8 ; ++y){
if(printf("%9x ",res[z + y]) < 0){
return -1;
}
if(res[z + y]){
++nonzero;
}
}
if(printf("\n") < 0){
return -1;
}
}
if(nonzero == 0){
fprintf(stderr," All-zero results. Kernel probably didn't run.\n");
return -1;
}
return 0;
}
// FIXME: we really ought take a bus specification rather than a device number,
// since the latter are unsafe across hardware removal/additions.
static void
usage(const char *a0){
fprintf(stderr,"usage: %s devno addrmin addrmax\n",a0);
}
int main(int argc,char **argv){
uint32_t hostres[GRID_SIZE * BLOCK_SIZE],*resarr;
unsigned long long min,max;
unsigned unit = 4; // Minimum alignment of references
unsigned long zul;
CUdeviceptr ptr;
cudadump_e res;
CUcontext ctx;
char *eptr;
int cerr;
if(argc != 4){
usage(*argv);
return CUDARANGER_EXIT_ERROR;
}
if(((zul = strtoul(argv[1],&eptr,0)) == ULONG_MAX && errno == ERANGE)
|| eptr == argv[1] || *eptr){
fprintf(stderr,"Invalid device number: %s\n",argv[1]);
usage(*argv);
return CUDARANGER_EXIT_ERROR;
}
if(((min = strtoull(argv[2],&eptr,0)) == ULLONG_MAX && errno == ERANGE)
|| eptr == argv[2] || *eptr){
fprintf(stderr,"Invalid minimum address: %s\n",argv[2]);
usage(*argv);
return CUDARANGER_EXIT_ERROR;
}
if(((max = strtoull(argv[3],&eptr,0)) == ULLONG_MAX && errno == ERANGE)
|| eptr == argv[3] || *eptr){
fprintf(stderr,"Invalid maximum address: %s\n",argv[3]);
usage(*argv);
return CUDARANGER_EXIT_ERROR;
}
if(max <= min){
fprintf(stderr,"Invalid arguments: max (%ju) <= min (%ju)\n",
max,min);
usage(*argv);
return CUDARANGER_EXIT_ERROR;
}
if((cerr = init_cuda_ctx(zul,&ctx)) != CUDA_SUCCESS){
fprintf(stderr,"Error initializing CUDA device %lu (%d, %s?)\n",
zul,cerr,cudaGetErrorString(cudaGetLastError()));
return CUDARANGER_EXIT_ERROR;
}
if(cudaMalloc(&resarr,sizeof(hostres)) || cudaMemset(resarr,0x00,sizeof(hostres))){
fprintf(stderr,"Error allocating %zu on device %lu (%s?)\n",
sizeof(hostres),zul,cudaGetErrorString(cudaGetLastError()));
return CUDARANGER_EXIT_ERROR;
}
if(cuda_alloc_max(NULL,&ptr,sizeof(unsigned)) == 0){
fprintf(stderr,"Error allocating max on device %lu (%s?)\n",
zul,cudaGetErrorString(cudaGetLastError()));
return CUDARANGER_EXIT_ERROR;
}
if((res = dump_cuda(min,max,unit,resarr)) != CUDARANGER_EXIT_SUCCESS){
return res;
}
if(cudaThreadSynchronize()){
return res;
}
if(cuMemFree(ptr)){
fprintf(stderr,"Warning: couldn't free memory\n");
}
if(cudaMemcpy(hostres,resarr,sizeof(hostres),cudaMemcpyDeviceToHost)){
fprintf(stderr,"Error copying %zu from device %lu (%s?)\n",
sizeof(hostres),zul,cudaGetErrorString(cudaGetLastError()));
return CUDARANGER_EXIT_ERROR;
}
if(cudaFree(resarr)){
fprintf(stderr,"Couldn't free %zu on device %lu (%s?)\n",
sizeof(hostres),zul,cudaGetErrorString(cudaGetLastError()));
return CUDARANGER_EXIT_ERROR;
}
if(dumpresults(hostres,sizeof(hostres) / sizeof(*hostres))){
return CUDARANGER_EXIT_ERROR;
}
return CUDARANGER_EXIT_SUCCESS;
}
|
7f0f012081c3737a6312743c5a5c5e0f5868e9dd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <unistd.h>
#include <iostream>
#include <string>
#include <sstream>
using namespace std;
#include "hip/hip_runtime_api.h"
#define SIZE_OF_MATRIX 1000
#define SIZE_OF_BLOCK 16
#define M SIZE_OF_MATRIX
unsigned int m = SIZE_OF_MATRIX;
#define idx(i,j,lda) ((j) + ((i)*(lda)))
__global__ void multiply_matrices(float *d_a, float *d_b, float *d_c, int lda)
{
unsigned int row = threadIdx.y + blockDim.y * blockIdx.y;
unsigned int col = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int id = idx(row,col,lda);
float ctemp = 0.0;
if (row < M && col < M)
{
for (unsigned int j=0; j<M; j++)
{
ctemp = ctemp + d_a[idx(row,j,lda)] * d_b[idx(j,col,lda)];
}
d_c[id] = ctemp;
}
}
__global__ void multiply_matrices_shared_blocks(float *d_a, float *d_b, float *d_c,
int lda)
{
int bs = SIZE_OF_BLOCK;
unsigned int row = threadIdx.y + blockDim.y * blockIdx.y;
unsigned int col = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int id = idx(row,col,lda);
//submatrices
float *sub_a, *sub_b;
//shared submatrices
__shared__ float a[SIZE_OF_BLOCK][SIZE_OF_BLOCK], b[SIZE_OF_BLOCK][SIZE_OF_BLOCK];
//temp element of d_c
float c = 0;
//top-level row,col of block
int block_row = blockIdx.y * bs;
int block_col = blockIdx.x * bs;
//id inside each block
int sub_row = threadIdx.y;
int sub_col = threadIdx.x;
//for each block
for (int k = 0; k < (M / bs); k++)
{
sub_a = &d_a[idx(block_row, bs*k, lda)];
sub_b = &d_b[idx(bs*k, block_col, lda)];
a[sub_row][sub_col] = sub_a[idx(sub_row, sub_col, lda)];
b[sub_row][sub_col] = sub_b[idx(sub_row, sub_col, lda)];
//wait for all threads to complete copy to shared memory.
__syncthreads();
//multiply each submatrix
for (int j=0; j < bs; j++)
{
c = c + a[sub_row][j] * b[j][sub_col];
}
// move results to device memory.
d_c[id] = c;
// wait for multiplication to finish before moving onto the next submatrix.
__syncthreads();
}
}
void multiply_by_element(dim3 grid, dim3 threads, float *d_a, float *d_b, float *d_c, int m, hipStream_t cStream)
{
hipError_t err;
unsigned int matsize = SIZE_OF_MATRIX*SIZE_OF_MATRIX*sizeof(float);
float* c = (float*)malloc(matsize);
hipLaunchKernelGGL(( multiply_matrices), dim3(grid), dim3(threads), 0, cStream , d_a, d_b, d_c, m);
err = hipGetLastError();
if (err != hipSuccess)
{
cout << "error in kernel, " << hipGetErrorString(err) << endl;
}
hipStreamSynchronize(cStream);
err = hipMemcpyAsync(c, d_c, matsize, hipMemcpyDeviceToHost, cStream);
if (err != hipSuccess)
{
cout << "error in memcpy, #=" << hipGetErrorString(err) << endl;
}
}
void multiply_by_block(dim3 grid, dim3 threads, float *d_a, float *d_b, float *d_c, int m, hipStream_t cStream)
{
hipError_t err;
unsigned int matsize = SIZE_OF_MATRIX*SIZE_OF_MATRIX*sizeof(float);
float* c = (float*)malloc(matsize);
hipLaunchKernelGGL(( multiply_matrices_shared_blocks), dim3(grid), dim3(threads), 0, cStream , d_a, d_b, d_c, m);
err = hipGetLastError();
if (err != hipSuccess)
{
cout << "error in kernel, " << hipGetErrorString(err) << endl;
}
hipStreamSynchronize(cStream);
err = hipMemcpyAsync(c, d_c, matsize, hipMemcpyDeviceToHost, cStream);
if (err != hipSuccess)
{
cout << "error in memcpy, #=" << hipGetErrorString(err) << endl;
}
}
int main(int argc, char** argv)
{
unsigned int number_of_threads = min(SIZE_OF_MATRIX, SIZE_OF_BLOCK);
unsigned int number_of_blocks;
if (SIZE_OF_MATRIX > SIZE_OF_BLOCK)
number_of_blocks = ceil(SIZE_OF_MATRIX / ((float) SIZE_OF_BLOCK));
else
number_of_blocks = 1;
unsigned int matsize = SIZE_OF_MATRIX*SIZE_OF_MATRIX*sizeof(float);
//cout << "blocks: " << number_of_blocks << " threads: " <<
//number_of_threads << endl;
//cout.flush();
float* a = (float*)malloc(matsize);
float* b = (float*)malloc(matsize);
float* c = (float*)malloc(matsize);
//initalize matrices
for (int i=0; i<m; i++) {
for (int j=0; j<m; j++) {
//a[i*m+j] = i;
//b[i*m+j] = i;
a[i*m+j] = i-j*2 + i-j+1 + 1;
b[i*m+j] = i-j*2 + i-j+1 + 1;
c[i*m+j] = 0;
//cout << a[i*m+j] << ", ";
}
//cout << endl;
}
hipError_t err;
int count = 0;
err = hipGetDeviceCount(&count);
cout << count << " devices found." << endl;
string device_list("");
int number_of_iterations = 1;
int opt = getopt(argc, argv, "d:i:");
while(opt != -1) {
stringstream str;
switch(opt) {
case 'd':
device_list = string(optarg);
break;
case 'i':
str << optarg;
str >> number_of_iterations;
break;
case '?':
if (optopt == 'd')
cerr << "Error, option -d requires argument: comma delimted list of devices to run on." << endl;
else if (optopt == 'i')
cerr << "Error, option -i requires argument: number of iterations to run." << endl;
else
cerr << "Error, unknow option. Usage:\nmatmult [-d <device id>,...] [-i <number of iterations]" << endl;
return 1;
default:
break;
}
opt = getopt(argc, argv, "d:i:");
}
int devices[count];
int nDevices = 0;
//default: use all the devices
if (device_list.compare("") == 0)
{
for (int d=0;d<count;d++)
{
devices[d] = d;
}
nDevices = count;
}
else
{
for (int d=0;d<count;d++)
{
stringstream str;
str << d;
char c = 0;
if (str >> c) {
if (device_list.find(c) != string::npos) {
devices[nDevices++] = d;
}
}
}
}
//cout << "finnished mapping devices." << endl;
float *d_a[nDevices], *d_b[nDevices], *d_c[nDevices];
hipStream_t streams[nDevices];
for (int d=0;d<nDevices;d++)
{
hipSetDevice(devices[d]);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, devices[d]);
cout << "Using device " << devices[d] << ", name: " << deviceProp.name << endl;
err = hipSetDevice(devices[d]);
if (err != hipSuccess)
{
cout << "error setting device, #=" << hipGetErrorString(err) << endl;
}
err = hipStreamCreate(&streams[d]);
if (err != hipSuccess)
{
cout << "error in stream creation, #=" << hipGetErrorString(err) << endl;
}
err = hipMalloc((void **) &d_a[d], matsize);
if (err != hipSuccess)
{
cout << "error in malloc, #=" << hipGetErrorString(err) << endl;
}
err = hipMalloc((void **) &d_b[d], matsize);
if (err != hipSuccess)
{
cout << "error in malloc, #=" << hipGetErrorString(err) << endl;
}
err = hipMalloc((void **) &d_c[d], matsize);
if (err != hipSuccess)
{
cout << "error in malloc, #=" << hipGetErrorString(err) << endl;
}
}
for (int i=0; i<number_of_iterations*nDevices; i++)
{
int cDevice = i%nDevices;
hipStream_t cStream = streams[cDevice];
hipSetDevice(devices[cDevice]);
if (err != hipSuccess)
{
cout << "error setting device: " << devices[i%nDevices] << " #=" << hipGetErrorString(err) << endl;
}
err = hipMemcpyAsync(d_a[cDevice], a, matsize, hipMemcpyHostToDevice, cStream);
if (err != hipSuccess)
{
cout << "error in memcpy, #=" << hipGetErrorString(err) << endl;
}
err = hipMemcpyAsync(d_b[cDevice], b, matsize, hipMemcpyHostToDevice, cStream);
if (err != hipSuccess)
{
cout << "error in memcpy, #=" << hipGetErrorString(err) << endl;
}
//cout << "running on device " << cDevice << endl;
dim3 grid(number_of_blocks, number_of_blocks);
dim3 threads(number_of_threads, number_of_threads, 1);
//multiply each element at a time.
multiply_by_element(grid, threads, d_a[cDevice], d_b[cDevice], d_c[cDevice], m, cStream);
//multiply by first load a 16x16 submatrix into shared memory.
multiply_by_block(grid, threads, d_a[cDevice], d_b[cDevice], d_c[cDevice], m, cStream);
}
cout << "Finished " << number_of_iterations << " iterations on " << nDevices << " devices." << endl;
for (int d=0;d<nDevices;d++)
{
hipSetDevice(devices[d]);
hipStreamSynchronize(streams[d]);
}
for (int d=0;d<nDevices;d++)
{
hipStreamDestroy(streams[d]);
}
//print c
/*
cout << " results: " << endl;
for (int i=0; i<m; i++) {
for (int j=0; j<m; j++) {
cout << c[i*m+j] << ", ";
}
cout << endl;
}
*/
//print c
/*
cout << " results: " << endl;
for (int i=0; i<m; i++) {
for (int j=0; j<m; j++) {
cout << c[i*m+j] << ", ";
}
cout << endl;
}
*/
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipDeviceReset();
}
|
7f0f012081c3737a6312743c5a5c5e0f5868e9dd.cu
|
#include <stdlib.h>
#include <unistd.h>
#include <iostream>
#include <string>
#include <sstream>
using namespace std;
#include "cuda_runtime_api.h"
#define SIZE_OF_MATRIX 1000
#define SIZE_OF_BLOCK 16
#define M SIZE_OF_MATRIX
unsigned int m = SIZE_OF_MATRIX;
#define idx(i,j,lda) ((j) + ((i)*(lda)))
__global__ void multiply_matrices(float *d_a, float *d_b, float *d_c, int lda)
{
unsigned int row = threadIdx.y + blockDim.y * blockIdx.y;
unsigned int col = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int id = idx(row,col,lda);
float ctemp = 0.0;
if (row < M && col < M)
{
for (unsigned int j=0; j<M; j++)
{
ctemp = ctemp + d_a[idx(row,j,lda)] * d_b[idx(j,col,lda)];
}
d_c[id] = ctemp;
}
}
__global__ void multiply_matrices_shared_blocks(float *d_a, float *d_b, float *d_c,
int lda)
{
int bs = SIZE_OF_BLOCK;
unsigned int row = threadIdx.y + blockDim.y * blockIdx.y;
unsigned int col = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int id = idx(row,col,lda);
//submatrices
float *sub_a, *sub_b;
//shared submatrices
__shared__ float a[SIZE_OF_BLOCK][SIZE_OF_BLOCK], b[SIZE_OF_BLOCK][SIZE_OF_BLOCK];
//temp element of d_c
float c = 0;
//top-level row,col of block
int block_row = blockIdx.y * bs;
int block_col = blockIdx.x * bs;
//id inside each block
int sub_row = threadIdx.y;
int sub_col = threadIdx.x;
//for each block
for (int k = 0; k < (M / bs); k++)
{
sub_a = &d_a[idx(block_row, bs*k, lda)];
sub_b = &d_b[idx(bs*k, block_col, lda)];
a[sub_row][sub_col] = sub_a[idx(sub_row, sub_col, lda)];
b[sub_row][sub_col] = sub_b[idx(sub_row, sub_col, lda)];
//wait for all threads to complete copy to shared memory.
__syncthreads();
//multiply each submatrix
for (int j=0; j < bs; j++)
{
c = c + a[sub_row][j] * b[j][sub_col];
}
// move results to device memory.
d_c[id] = c;
// wait for multiplication to finish before moving onto the next submatrix.
__syncthreads();
}
}
void multiply_by_element(dim3 grid, dim3 threads, float *d_a, float *d_b, float *d_c, int m, cudaStream_t cStream)
{
cudaError err;
unsigned int matsize = SIZE_OF_MATRIX*SIZE_OF_MATRIX*sizeof(float);
float* c = (float*)malloc(matsize);
multiply_matrices<<< grid, threads, 0, cStream >>>(d_a, d_b, d_c, m);
err = cudaGetLastError();
if (err != cudaSuccess)
{
cout << "error in kernel, " << cudaGetErrorString(err) << endl;
}
cudaStreamSynchronize(cStream);
err = cudaMemcpyAsync(c, d_c, matsize, cudaMemcpyDeviceToHost, cStream);
if (err != cudaSuccess)
{
cout << "error in memcpy, #=" << cudaGetErrorString(err) << endl;
}
}
void multiply_by_block(dim3 grid, dim3 threads, float *d_a, float *d_b, float *d_c, int m, cudaStream_t cStream)
{
cudaError err;
unsigned int matsize = SIZE_OF_MATRIX*SIZE_OF_MATRIX*sizeof(float);
float* c = (float*)malloc(matsize);
multiply_matrices_shared_blocks<<< grid, threads, 0, cStream >>>(d_a, d_b, d_c, m);
err = cudaGetLastError();
if (err != cudaSuccess)
{
cout << "error in kernel, " << cudaGetErrorString(err) << endl;
}
cudaStreamSynchronize(cStream);
err = cudaMemcpyAsync(c, d_c, matsize, cudaMemcpyDeviceToHost, cStream);
if (err != cudaSuccess)
{
cout << "error in memcpy, #=" << cudaGetErrorString(err) << endl;
}
}
int main(int argc, char** argv)
{
unsigned int number_of_threads = min(SIZE_OF_MATRIX, SIZE_OF_BLOCK);
unsigned int number_of_blocks;
if (SIZE_OF_MATRIX > SIZE_OF_BLOCK)
number_of_blocks = ceil(SIZE_OF_MATRIX / ((float) SIZE_OF_BLOCK));
else
number_of_blocks = 1;
unsigned int matsize = SIZE_OF_MATRIX*SIZE_OF_MATRIX*sizeof(float);
//cout << "blocks: " << number_of_blocks << " threads: " <<
//number_of_threads << endl;
//cout.flush();
float* a = (float*)malloc(matsize);
float* b = (float*)malloc(matsize);
float* c = (float*)malloc(matsize);
//initalize matrices
for (int i=0; i<m; i++) {
for (int j=0; j<m; j++) {
//a[i*m+j] = i;
//b[i*m+j] = i;
a[i*m+j] = i-j*2 + i-j+1 + 1;
b[i*m+j] = i-j*2 + i-j+1 + 1;
c[i*m+j] = 0;
//cout << a[i*m+j] << ", ";
}
//cout << endl;
}
cudaError_t err;
int count = 0;
err = cudaGetDeviceCount(&count);
cout << count << " devices found." << endl;
string device_list("");
int number_of_iterations = 1;
int opt = getopt(argc, argv, "d:i:");
while(opt != -1) {
stringstream str;
switch(opt) {
case 'd':
device_list = string(optarg);
break;
case 'i':
str << optarg;
str >> number_of_iterations;
break;
case '?':
if (optopt == 'd')
cerr << "Error, option -d requires argument: comma delimted list of devices to run on." << endl;
else if (optopt == 'i')
cerr << "Error, option -i requires argument: number of iterations to run." << endl;
else
cerr << "Error, unknow option. Usage:\nmatmult [-d <device id>,...] [-i <number of iterations]" << endl;
return 1;
default:
break;
}
opt = getopt(argc, argv, "d:i:");
}
int devices[count];
int nDevices = 0;
//default: use all the devices
if (device_list.compare("") == 0)
{
for (int d=0;d<count;d++)
{
devices[d] = d;
}
nDevices = count;
}
else
{
for (int d=0;d<count;d++)
{
stringstream str;
str << d;
char c = 0;
if (str >> c) {
if (device_list.find(c) != string::npos) {
devices[nDevices++] = d;
}
}
}
}
//cout << "finnished mapping devices." << endl;
float *d_a[nDevices], *d_b[nDevices], *d_c[nDevices];
cudaStream_t streams[nDevices];
for (int d=0;d<nDevices;d++)
{
cudaSetDevice(devices[d]);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, devices[d]);
cout << "Using device " << devices[d] << ", name: " << deviceProp.name << endl;
err = cudaSetDevice(devices[d]);
if (err != cudaSuccess)
{
cout << "error setting device, #=" << cudaGetErrorString(err) << endl;
}
err = cudaStreamCreate(&streams[d]);
if (err != cudaSuccess)
{
cout << "error in stream creation, #=" << cudaGetErrorString(err) << endl;
}
err = cudaMalloc((void **) &d_a[d], matsize);
if (err != cudaSuccess)
{
cout << "error in malloc, #=" << cudaGetErrorString(err) << endl;
}
err = cudaMalloc((void **) &d_b[d], matsize);
if (err != cudaSuccess)
{
cout << "error in malloc, #=" << cudaGetErrorString(err) << endl;
}
err = cudaMalloc((void **) &d_c[d], matsize);
if (err != cudaSuccess)
{
cout << "error in malloc, #=" << cudaGetErrorString(err) << endl;
}
}
for (int i=0; i<number_of_iterations*nDevices; i++)
{
int cDevice = i%nDevices;
cudaStream_t cStream = streams[cDevice];
cudaSetDevice(devices[cDevice]);
if (err != cudaSuccess)
{
cout << "error setting device: " << devices[i%nDevices] << " #=" << cudaGetErrorString(err) << endl;
}
err = cudaMemcpyAsync(d_a[cDevice], a, matsize, cudaMemcpyHostToDevice, cStream);
if (err != cudaSuccess)
{
cout << "error in memcpy, #=" << cudaGetErrorString(err) << endl;
}
err = cudaMemcpyAsync(d_b[cDevice], b, matsize, cudaMemcpyHostToDevice, cStream);
if (err != cudaSuccess)
{
cout << "error in memcpy, #=" << cudaGetErrorString(err) << endl;
}
//cout << "running on device " << cDevice << endl;
dim3 grid(number_of_blocks, number_of_blocks);
dim3 threads(number_of_threads, number_of_threads, 1);
//multiply each element at a time.
multiply_by_element(grid, threads, d_a[cDevice], d_b[cDevice], d_c[cDevice], m, cStream);
//multiply by first load a 16x16 submatrix into shared memory.
multiply_by_block(grid, threads, d_a[cDevice], d_b[cDevice], d_c[cDevice], m, cStream);
}
cout << "Finished " << number_of_iterations << " iterations on " << nDevices << " devices." << endl;
for (int d=0;d<nDevices;d++)
{
cudaSetDevice(devices[d]);
cudaStreamSynchronize(streams[d]);
}
for (int d=0;d<nDevices;d++)
{
cudaStreamDestroy(streams[d]);
}
//print c
/*
cout << " results: " << endl;
for (int i=0; i<m; i++) {
for (int j=0; j<m; j++) {
cout << c[i*m+j] << ", ";
}
cout << endl;
}
*/
//print c
/*
cout << " results: " << endl;
for (int i=0; i<m; i++) {
for (int j=0; j<m; j++) {
cout << c[i*m+j] << ", ";
}
cout << endl;
}
*/
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaThreadExit();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.